repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
gkawamoto/yowsup
|
refs/heads/master
|
yowsup/demos/sendclient/stack.py
|
9
|
from yowsup.stacks import YowStack
from .layer import SendLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.protocol_messages import YowMessagesProtocolLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yowsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers import YowParallelLayer
class YowsupSendStack(object):
def __init__(self, credentials, messages, encryptionEnabled = False):
"""
:param credentials:
:param messages: list of (jid, message) tuples
:param encryptionEnabled:
:return:
"""
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
SendLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer]),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
SendLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer]),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setProp(SendLayer.PROP_MESSAGES, messages)
self.stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message)
|
kingosticks/mopidy
|
refs/heads/develop
|
tests/models/test_legacy.py
|
4
|
import unittest
from mopidy.models import ImmutableObject
class Model(ImmutableObject):
uri = None
name = None
models = frozenset()
def __init__(self, *args, **kwargs):
self.__dict__["models"] = frozenset(kwargs.pop("models", None) or [])
super().__init__(self, *args, **kwargs)
class SubModel(ImmutableObject):
uri = None
name = None
class GenericCopyTest(unittest.TestCase):
def compare(self, orig, other):
assert orig == other
assert id(orig) != id(other)
def test_copying_model(self):
model = Model()
self.compare(model, model.replace())
def test_copying_model_with_basic_values(self):
model = Model(name="foo", uri="bar")
other = model.replace(name="baz")
assert "baz" == other.name
assert "bar" == other.uri
def test_copying_model_with_missing_values(self):
model = Model(uri="bar")
other = model.replace(name="baz")
assert "baz" == other.name
assert "bar" == other.uri
def test_copying_model_with_private_internal_value(self):
model = Model(models=[SubModel(name=123)])
other = model.replace(models=[SubModel(name=345)])
assert SubModel(name=345) in other.models
def test_copying_model_with_invalid_key(self):
with self.assertRaises(TypeError):
Model().replace(invalid_key=True)
def test_copying_model_to_remove(self):
model = Model(name="foo").replace(name=None)
assert model == Model()
class ModelTest(unittest.TestCase):
def test_uri(self):
uri = "an_uri"
model = Model(uri=uri)
assert model.uri == uri
with self.assertRaises(AttributeError):
model.uri = None
def test_name(self):
name = "a name"
model = Model(name=name)
assert model.name == name
with self.assertRaises(AttributeError):
model.name = None
def test_submodels(self):
models = [SubModel(name=123), SubModel(name=456)]
model = Model(models=models)
assert set(model.models) == set(models)
with self.assertRaises(AttributeError):
model.models = None
def test_models_none(self):
assert set() == Model(models=None).models
def test_invalid_kwarg(self):
with self.assertRaises(TypeError):
Model(foo="baz")
def test_repr_without_models(self):
assert "Model(name='name', uri='uri')" == repr(
Model(uri="uri", name="name")
)
def test_repr_with_models(self):
assert (
"Model(models=[SubModel(name=123)], name='name', uri='uri')"
== repr(Model(uri="uri", name="name", models=[SubModel(name=123)]))
)
def test_serialize_without_models(self):
self.assertDictEqual(
{"__model__": "Model", "uri": "uri", "name": "name"},
Model(uri="uri", name="name").serialize(),
)
def test_serialize_with_models(self):
submodel = SubModel(name=123)
self.assertDictEqual(
{
"__model__": "Model",
"uri": "uri",
"name": "name",
"models": [submodel.serialize()],
},
Model(uri="uri", name="name", models=[submodel]).serialize(),
)
def test_eq_uri(self):
model1 = Model(uri="uri1")
model2 = Model(uri="uri1")
assert model1 == model2
assert hash(model1) == hash(model2)
def test_eq_name(self):
model1 = Model(name="name1")
model2 = Model(name="name1")
assert model1 == model2
assert hash(model1) == hash(model2)
def test_eq_models(self):
models = [SubModel()]
model1 = Model(models=models)
model2 = Model(models=models)
assert model1 == model2
assert hash(model1) == hash(model2)
def test_eq_models_order(self):
submodel1 = SubModel(name="name1")
submodel2 = SubModel(name="name2")
model1 = Model(models=[submodel1, submodel2])
model2 = Model(models=[submodel2, submodel1])
assert model1 == model2
assert hash(model1) == hash(model2)
def test_eq_none(self):
assert Model() is not None
def test_eq_other(self):
assert Model() != "other"
def test_ne_uri(self):
model1 = Model(uri="uri1")
model2 = Model(uri="uri2")
assert model1 != model2
assert hash(model1) != hash(model2)
def test_ne_name(self):
model1 = Model(name="name1")
model2 = Model(name="name2")
assert model1 != model2
assert hash(model1) != hash(model2)
def test_ne_models(self):
model1 = Model(models=[SubModel(name="name1")])
model2 = Model(models=[SubModel(name="name2")])
assert model1 != model2
assert hash(model1) != hash(model2)
def test_ignores_values_with_default_value_none(self):
model1 = Model(name="name1")
model2 = Model(name="name1", uri=None)
assert model1 == model2
assert hash(model1) == hash(model2)
|
spirosmastorakis/ns-3-dev-ndnSIM
|
refs/heads/master
|
doc/models/source/conf.py
|
90
|
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2011, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Model Library'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Models'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-model-library.tex', u'ns-3 Model Library',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
|
codrut3/tensorflow
|
refs/heads/master
|
tensorflow/contrib/model_pruning/python/learning.py
|
32
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Set up sparsity
sparsity = pruning.setup_gradual_sparsity(self.global_step)
# Create mask update op
mask_update_op = pruning.add_mask_update_ip(sparsity)
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is
greater than 'number_of_steps'. If the value is left as None, training
proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of
them. If the argument is supplied, gradient updates will be synchronous.
If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
def train_step_with_pruning_fn(sess, train_op, global_step,
train_step_kwargs):
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
sess.run(mask_update_op)
return total_loss, should_stop
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
|
BCVisin/PhotoViewer
|
refs/heads/master
|
app_motion.py
|
1
|
from Tkinter import *
import random
import get_photos
class MyApp(Tk):
def __init__(self):
Tk.__init__(self)
#self.attributes('-zoomed', True)
self.configure(bg='black')
self.attributes("-fullscreen", True)
self.configure(cursor="none")
self.move_counter = 0
self.play_show = True
self.screen_width = self.winfo_screenwidth()
self.screen_height = self.winfo_screenheight()
self.photo = None
self.screen_width, self.screen_height = (1920 + 100, 1080 + 100)
self.photos = get_photos.get_photos(max_w=self.screen_width, max_h=self.screen_height)
#self.w = Label(self, background='black')
self.w = Canvas(self, width=self.screen_width, height=self.screen_height, background='black', highlightthickness=0)
self.l = Label(self.w, background='black')
self.l.pack(expand=True, fill="both")
self.bind("<Escape>", self.quit)
self.bind("s", self.start_stop_show)
self.bind("<space>", self.start_stop_show)
self.bind("<Left>", self.previous_photo)
self.bind("<Right>", self.next_photo)
self.bind('q', self.quit)
self.show()
def quit(self, event=None):
self.destroy()
def start_stop_show(self, event=None):
if self.play_show:
self.play_show = False
else:
self.play_show = True
self.show()
def previous_photo(self, event=None):
self.play_show = False
self.display_photo(self.photos.get_previous())
def next_photo(self, event=None):
self.play_show = False
self.display_photo(self.photos.get_next())
def display_photo(self, photo):
#anchor = self.get_random_anchor()
#self.w.pack(expand=True, fill="both")
#if self.photo:
# self.w.delete(self.photo)
#self.photo = self.w.create_image(anchor[1][0], anchor[1][1] , image=photo, anchor=anchor[0])
self.l.configure(image=photo, anchor=self.get_random_anchor())
self.l.pack()
self.w.pack()
self.move_counter = 0
self.x_pos = bool(random.getrandbits(1))
self.y_pos = bool(random.getrandbits(1))
self.move_widget()
def get_random_anchor(self):
anchors = ['n', 'e', 's', 'w', 'ne', 'nw', 'se', 'sw', 'center']
return anchors[random.randint(0, len(anchors) - 1)]
def get_random_anchor2(self):
w = self.screen_width - 50
h = self.screen_height - 50
half_w = w / 2
half_h = h / 2
return ('center', (half_w, half_h))
#anchors = [('n', (half_w, -50)), ('e', (w, half_h)), ('s', (half_w, h)), ('w', (-50, half_h)), ('center', (half_w, half_h))]
#return anchors[random.randint(0, len(anchors) - 1)]
def move_widget(self):
if self.play_show:
self.move_counter += 1
x = 1 if self.x_pos else -1
y = 1 if self.y_pos else -1
self.w.move(self.l, x, y)
self.move_counter += 1
self.after(500, self.move_widget)
def show(self):
if self.play_show:
self.display_photo(self.photos.get_next())
self.after(5000, self.show)
if __name__ == "__main__":
root = MyApp()
root.mainloop()
|
gboudreau/CouchPotato
|
refs/heads/master
|
library/sqlalchemy/queue.py
|
18
|
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition."""
from collections import deque
from time import time as _time
from sqlalchemy.util import threading
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
|
metarelate/metarelate
|
refs/heads/master
|
lib/metarelate/thread.py
|
1
|
# (C) British Crown Copyright 2015, Met Office
#
# This file is part of metarelate.
#
# metarelate is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# metarelate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with metarelate. If not, see <http://www.gnu.org/licenses/>.
from threading import Thread
import metarelate
# maximum number of threads for multi-thrteading code
MAXTHREADS = metarelate.site_config.get('num_workers')
class WorkerThread(Thread):
"""
A :class:threading.Thread which moves objects from an input queue to an
output deque using a 'dowork' method, as defined by a subclass.
"""
def __init__(self, aqueue, adeque, fu_p=None, service=None):
self.queue = aqueue
self.deque = adeque
self.fuseki_process = fu_p
self.service = service
Thread.__init__(self)
self.daemon = True
def run(self):
while not self.queue.empty():
resource = self.queue.get()
try:
self.dowork(resource)
self.deque.append(resource)
except Exception, e:
print e
self.queue.task_done()
|
wesokes/django-query-builder
|
refs/heads/develop
|
querybuilder/tests/field_tests.py
|
2
|
import datetime
from fleming import unix_time, floor
from querybuilder.fields import SimpleField, LagDifferenceField, LeadDifferenceField, AllTime, SumField, Week
from querybuilder.query import Query, QueryWindow
from querybuilder.tests.models import Order
from querybuilder.tests.query_tests import QueryTestCase
class FieldTest(QueryTestCase):
def test_cast(self):
"""
Tests that the cast sql is generated and that the resulting value is of that type
"""
query = Query().from_table(
table=Order,
fields=[SimpleField(field='revenue', cast='INT')]
).order_by('revenue').limit(1)
expected_query = (
'SELECT CAST(querybuilder_tests_order.revenue AS INT) FROM querybuilder_tests_order '
'ORDER BY revenue ASC LIMIT 1'
)
self.assertEqual(expected_query, query.get_sql())
rows = query.select()
self.assertEqual(1, len(rows))
self.assertEqual(type(rows[0]['revenue']), int)
def test_get_alias(self):
"""
Tests the different cases of getting the alias of a field
"""
field = SimpleField(field='revenue')
query = Query().from_table(table=Order, fields=[field])
expected_query = 'SELECT querybuilder_tests_order.revenue FROM querybuilder_tests_order'
self.assertEqual(expected_query, query.get_sql())
field.auto_alias = 'my_auto_alias'
query = Query().from_table(table=Order, fields=[field])
expected_query = 'SELECT querybuilder_tests_order.revenue AS "my_auto_alias" FROM querybuilder_tests_order'
self.assertEqual(expected_query, query.get_sql())
field.alias = 'my_alias'
query = Query().from_table(table=Order, fields=[field])
expected_query = 'SELECT querybuilder_tests_order.revenue AS "my_alias" FROM querybuilder_tests_order'
self.assertEqual(expected_query, query.get_sql())
query = Query().from_table(
table=Order,
fields=[field],
prefix_fields=True,
field_prefix='my_field_prefix',
)
expected_query = (
'SELECT querybuilder_tests_order.revenue AS "my_field_prefix__my_alias" FROM querybuilder_tests_order'
)
self.assertEqual(expected_query, query.get_sql())
field.alias = None
field.auto_alias = None
query = Query().from_table(
table=Order,
fields=[field],
prefix_fields=True,
field_prefix='my_field_prefix',
)
expected_query = (
'SELECT querybuilder_tests_order.revenue AS "my_field_prefix__revenue" FROM querybuilder_tests_order'
)
self.assertEqual(expected_query, query.get_sql())
def lead_lag_difference_test(self):
query = Query().from_table(
table=Order,
fields=[
'margin',
LagDifferenceField('margin', over=QueryWindow().order_by('-margin')),
LeadDifferenceField('margin', over=QueryWindow().order_by('-margin')),
]
)
expected_query = (
'SELECT querybuilder_tests_order.margin, '
'((querybuilder_tests_order.margin) - (LAG(querybuilder_tests_order.margin, 1) '
'OVER (ORDER BY margin DESC))) AS "margin_lag", '
'((querybuilder_tests_order.margin) - (LEAD(querybuilder_tests_order.margin, 1) '
'OVER (ORDER BY margin DESC))) AS "margin_lead" '
'FROM querybuilder_tests_order'
)
self.assertEqual(expected_query, query.get_sql())
rows = query.select()
self.assertEqual(4, len(rows))
self.assertEqual(None, rows[0]['margin_lag'])
self.assertEqual(500.0, rows[0]['margin_lead'])
self.assertEqual(-75.0, rows[3]['margin_lag'])
self.assertEqual(None, rows[3]['margin_lead'])
def date_part_field_test(self):
"""
Tests the different options of DatePartField objects
"""
# test with no cast
query = Query().from_table(
table=Order,
fields=[
AllTime('time'),
SumField('margin')
]
)
expected_query = (
'SELECT CAST(0 AS INT) AS "time__epoch", '
'SUM(querybuilder_tests_order.margin) AS "margin_sum" '
'FROM querybuilder_tests_order'
)
self.assertEqual(expected_query, query.get_sql())
rows = query.select()
self.assertEqual(1, len(rows))
self.assertEqual(825.0, rows[0]['margin_sum'])
self.assertEqual(0, rows[0]['time__epoch'])
# def test_cast(self):
# """
# Verified a value is casted to a float
# """
# query = Query().from_table(
# table=Order,
# fields=[
# AllTime('time', cast='FLOAT'),
# SumField('margin')
# ]
# )
# expected_query = (
# 'SELECT CAST(0 AS FLOAT) AS time__epoch, '
# 'SUM(querybuilder_tests_order.margin) AS margin_sum '
# 'FROM querybuilder_tests_order'
# )
# self.assertEqual(expected_query, query.get_sql())
# rows = query.select()
# self.assertEqual(1, len(rows))
# self.assertEqual(825.0, rows[0]['margin_sum'])
# self.assertEqual(float, type(rows[0]['time__epoch']))
# self.assertEqual(0.0, rows[0]['time__epoch'])
def test_week_grouping(self):
"""
Verifies that the week grouping query and result is correct
"""
query = Query().from_table(
table=Order,
fields=[
Week('time', auto=True),
SumField('margin')
]
)
expected_query = (
'SELECT CAST(EXTRACT(year FROM querybuilder_tests_order.time) AS INT) AS "time__year", '
'CAST(EXTRACT(week FROM querybuilder_tests_order.time) AS INT) AS "time__week", '
'CAST(EXTRACT(epoch FROM date_trunc(\'week\', querybuilder_tests_order.time)) AS INT) AS "time__epoch", '
'SUM(querybuilder_tests_order.margin) AS "margin_sum" '
'FROM querybuilder_tests_order '
'GROUP BY time__year, time__week, time__epoch '
'ORDER BY time__epoch ASC'
)
self.assertEqual(expected_query, query.get_sql())
rows = query.select()
self.assertEqual(1, len(rows))
week_dt = datetime.datetime(2012, 10, 19)
week_unix_time = unix_time(floor(week_dt, week=1))
self.assertEqual(825.0, rows[0]['margin_sum'])
self.assertEqual(week_unix_time, rows[0]['time__epoch'])
self.assertEqual(2012, rows[0]['time__year'])
self.assertEqual(42, rows[0]['time__week'])
|
caslei/TfModels
|
refs/heads/master
|
inception/inception/slim/scopes.py
|
28
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the new arg_scope used for TF-Slim ops.
Allows one to define models much more compactly by eliminating boilerplate
code. This is accomplished through the use of argument scoping (arg_scope).
Example of how to use scopes.arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005):
net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
The first call to conv2d will overwrite padding:
ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
stddev=0.01, weight_decay=0.0005, scope='conv1')
The second call to Conv will use predefined args:
ops.conv2d(inputs, 256, [5, 5], padding='SAME',
stddev=0.01, weight_decay=0.0005, scope='conv2')
Example of how to reuse an arg_scope:
with scopes.arg_scope(ops.conv2d, padding='SAME',
stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope:
net = ops.conv2d(net, 256, [5, 5], scope='conv1')
....
with scopes.arg_scope(conv2d_arg_scope):
net = ops.conv2d(net, 256, [5, 5], scope='conv2')
Example of how to use scopes.add_arg_scope:
@scopes.add_arg_scope
def conv2d(*args, **kwargs)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
from tensorflow.python.framework import ops
_ARGSTACK_KEY = ("__arg_stack",)
_DECORATED_OPS = set()
def _get_arg_stack():
stack = ops.get_collection(_ARGSTACK_KEY)
if stack:
return stack[0]
else:
stack = [{}]
ops.add_to_collection(_ARGSTACK_KEY, stack)
return stack
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _add_op(op):
key_op = (op.__module__, op.__name__)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS.add(key_op)
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Stores the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containg the current scope. When list_ops_or_scope is a dict,
kwargs must be empty. When list_ops_or_scope is a list or tuple, then
every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError("When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty.")
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError("list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)")
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
key_op = (op.__module__, op.__name__)
if not has_arg_scope(op):
raise ValueError("%s is not decorated with @add_arg_scope", key_op)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorates a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = (func.__module__, func.__name__)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
return func_with_args
def has_arg_scope(func):
"""Checks whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
key_op = (func.__module__, func.__name__)
return key_op in _DECORATED_OPS
|
edisonlz/fruit
|
refs/heads/master
|
web_project/base/site-packages/pygments/styles/emacs.py
|
75
|
# -*- coding: utf-8 -*-
"""
pygments.styles.emacs
~~~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by Emacs.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class EmacsStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #008800",
Comment.Preproc: "noitalic",
Comment.Special: "noitalic bold",
Keyword: "bold #AA22FF",
Keyword.Pseudo: "nobold",
Keyword.Type: "bold #00BB00",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#AA22FF",
Name.Function: "#00A000",
Name.Class: "#0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#B8860B",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#BB4444",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BB4444",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
String.Symbol: "#B8860B",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
kaitheslayer/developingprojects
|
refs/heads/master
|
Iris/Main File.py
|
1
|
import re
import win32com.client as wincl
import pyowm
import math
ans = ''
preans = ''
def tr(x):
return {
'how are you': 'speak("I do not feel emotions.")',
'add': 'add(ine)',
'multiply': 'multiply(ine)',
'divide': 'divide(ine)',
'power': 'po(ine)',
'minus': 'minus(ine)',
'weather': 'weather(ine)'
}[x]
def speak(x):
wincl.Dispatch("SAPI.SpVoice").Speak(x)
def output (group, mode, x):
# Group 1 = Mathematical Answer, Group 2 = String Ouput
if mode == 0:
if group == 1:
answer(x)
speak ("The answer is " + str(x))
if group == 2:
speak (x)
def answer(x):
global ans
global preans
preans = ans
ans = x
def minus(v):
v = re.findall(r'\b\d+\b', v)
t = 0
c = []
for _ in v:
c.append(float(_))
y = len(c)
a = c[0] - sum(c[1:y])
output(1, 0, a)
def e(x):
exec(x)
def add(x):
t = 0
c = re.findall(r'\b\d+\b', x)
for _ in c:
t = float(_) + t
print (t)
output(1, 0, t)
def multiply(x):
t = 1
c = re.findall(r'\b\d+\b', x)
for _ in c:
t = float(_) * t
print (t)
output(1, 0, t)
def divide(x):
c = re.findall(r'\b\d+\b', x)
p = c[0]
del(c[0])
for _ in c:
p = float(p) / float(_)
output(1, 0, p)
def po(x):
t = 1
c = re.findall(r'\b\d+\b', x)
t = float(c[0]) ** float(c[1])
output(1, 0, t)
def weather(lc):
b = re.findall(r'(?<!\.\s)\b[A-Z][a-z]*\b', lc)
owm = pyowm.OWM('ae0bcff60dfaaa49d582c81a517da9c0')
location = ' '.join(b)
observation = owm.weather_at_place(location)
w = observation.get_weather()
b = "It is currently %s With a temperature of %s °C \n a max of %s °C and a min of %s °C " % ( w.get_status(), w.get_temperature(unit='celsius')['temp'], w.get_temperature(unit='celsius')['temp_max'],
w.get_temperature(unit='celsius')['temp_min'])
output(2, 0, b)
ine = "what is the weather in London"
words = ['add', 'subtract', 'divide', 'minus', 'multiply', 'weather', 'how are you', 'weather', 'power', 'answer to universe']
word_exp = '|'.join(words)
inputi = re.findall(word_exp, ine, re.M)
output = e(tr(inputi[0]))
|
ThinkboxSoftware/Deadline
|
refs/heads/master
|
Custom/events/Zabbix/API/requests/packages/urllib3/util/connection.py
|
1
|
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
|
JioEducation/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor_analytics/tests/test_distributions.py
|
165
|
""" Tests for analytics.distributions """
from django.test import TestCase
from nose.tools import raises
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor_analytics.distributions import profile_distribution, AVAILABLE_PROFILE_FEATURES
class TestAnalyticsDistributions(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributions, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__gender=['m', 'f', 'o'][i % 3],
profile__level_of_education=['a', 'hs', 'el'][i % 3],
profile__year_of_birth=i + 1930
) for i in xrange(30)]
self.ces = [CourseEnrollment.enroll(user, self.course_id)
for user in self.users]
@raises(ValueError)
def test_profile_distribution_bad_feature(self):
feature = 'robot-not-a-real-feature'
self.assertNotIn(feature, AVAILABLE_PROFILE_FEATURES)
profile_distribution(self.course_id, feature)
def test_profile_distribution_easy_choice(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertEqual(distribution.data['no_data'], 0)
self.assertEqual(distribution.data['m'], len(self.users) / 3)
self.assertEqual(distribution.choices_display_names['m'], 'Male')
def test_profile_distribution_open_choice(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertNotIn('no_data', distribution.data)
self.assertEqual(distribution.data[1930], 1)
def test_gender_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__gender='m'
)
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments) - 1)
def test_level_of_education_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__level_of_education='hs'
)
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments) - 1)
class TestAnalyticsDistributionsNoData(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributionsNoData, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__year_of_birth=i + 1930,
) for i in xrange(5)]
self.nodata_users = [UserFactory(
profile__year_of_birth=None,
profile__gender=[None, ''][i % 2]
) for i in xrange(4)]
self.users += self.nodata_users
self.ces = tuple(CourseEnrollment.enroll(user, self.course_id)
for user in self.users)
def test_profile_distribution_easy_choice_nodata(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertNotEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
def test_profile_distribution_open_choice_nodata(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
vendor/packages/kombu/funtests/tests/test_sqla.py
|
30
|
from nose import SkipTest
from funtests import transport
class test_sqla(transport.TransportCase):
transport = 'sqlalchemy'
prefix = 'sqlalchemy'
event_loop_max = 10
connection_options = {'hostname': 'sqla+sqlite://'}
def before_connect(self):
try:
import sqlalchemy # noqa
except ImportError:
raise SkipTest('sqlalchemy not installed')
|
dchud/sentinel
|
refs/heads/master
|
canary/ui/stats/__init__.py
|
1
|
# $Id$
_q_exports = [
'_q_index',
'query',
]
from quixote.errors import AccessError
from canary.qx_defs import NotLoggedInError
from canary.ui.stats import stats_ui
_q_index = stats_ui._q_index
query = stats_ui.query
def _q_access (request):
if request.session.user == None:
raise NotLoggedInError('Authorized access only.')
if not (request.session.user.is_admin \
or request.session.user.is_editor \
or request.session.user.is_assistant):
raise AccessError("You don't have access to this page.")
|
P1sec/suricata
|
refs/heads/master
|
rust/gen-c-headers.py
|
2
|
#! /usr/bin/env python2
# This script will scan Rust source files looking for extern "C"
# functions and generate C header files from them with a filename
# based on the Rust filename.
#
# Usage: From the top suricata source directory:
#
# ./rust/gen-c-headers.py
#
from __future__ import print_function
import sys
import os
import re
from io import StringIO
template = """/* Copyright (C) 2017 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* DO NOT EDIT. This file is automatically generated.
*/
#ifndef __%(name)s__
#define __%(name)s__
%(prototypes)s
#endif /* ! __%(name)s__ */
"""
# Map of Rust types to C types.
type_map = {
"bool": "bool",
"i8": "int8_t",
"i16" :"int16_t",
"i32" :"int32_t",
"i64" :"int64_t",
"u8": "uint8_t",
"u16" :"uint16_t",
"u32" :"uint32_t",
"u64" :"uint64_t",
"libc::c_void": "void",
"libc::c_char": "char",
"libc::c_int": "int",
"c_int": "int",
"libc::int8_t": "int8_t",
"libc::uint8_t": "uint8_t",
"libc::uint16_t": "uint16_t",
"libc::uint32_t": "uint32_t",
"libc::uint64_t": "uint64_t",
"SuricataContext": "SuricataContext",
"SuricataFileContext": "SuricataFileContext",
"FileContainer": "FileContainer",
"core::Flow": "Flow",
"Flow": "Flow",
"DNSState": "RSDNSState",
"DNSTransaction": "RSDNSTransaction",
"NFSState": "NFSState",
"NFSTransaction": "NFSTransaction",
"NTPState": "NTPState",
"NTPTransaction": "NTPTransaction",
"JsonT": "json_t",
"DetectEngineState": "DetectEngineState",
"core::DetectEngineState": "DetectEngineState",
"core::AppLayerDecoderEvents": "AppLayerDecoderEvents",
"AppLayerDecoderEvents": "AppLayerDecoderEvents",
"core::AppLayerEventType": "AppLayerEventType",
"AppLayerEventType": "AppLayerEventType",
"CLuaState": "lua_State",
"Store": "Store",
}
def convert_type(rs_type):
m = re.match("^[^\s]+$", rs_type)
if m:
if rs_type in type_map:
return type_map[rs_type]
m = re.match("^(.*)(\s[^\s]+)$", rs_type)
if m:
mod = m.group(1).strip()
rtype = m.group(2).strip()
if rtype in type_map:
if mod in [
"*mut",
"* mut",
"&mut",
"&'static mut",
]:
return "%s *" % (type_map[rtype])
elif mod in [
"*const",
"* const"]:
return "const %s *" % (type_map[rtype])
elif mod in [
"*mut *const",
"*mut*const"]:
return "%s **" % (type_map[rtype])
else:
raise Exception("Unknown modifier '%s' in '%s'." % (
mod, rs_type))
else:
raise Exception("Unknown type: %s" % (rtype))
raise Exception("Failed to parse Rust type: %s" % (rs_type))
def make_output_filename(filename):
parts = filename.split(os.path.sep)[2:]
last = os.path.splitext(parts.pop())[0]
outpath = "./gen/c-headers/rust-%s-%s-gen.h" % (
"-".join(parts), last)
return outpath.replace("--", "-")
def write_header(fileobj, filename):
filename = os.path.basename(filename).replace(
"-", "_").replace(".", "_").upper()
fileobj.write(file_header % {"name": filename})
def should_regen(input_filename, output_filename):
"""Check if a file should be regenerated. If the output doesn't exist,
or the input is newer than the output return True. Otherwise
return False.
"""
if not os.path.exists(output_filename):
return True
if os.stat(input_filename).st_mtime > os.stat(output_filename).st_mtime:
return True
return False
def gen_headers(filename):
output_filename = make_output_filename(filename)
if not should_regen(filename, output_filename):
return
buf = open(filename).read()
writer = StringIO()
for fn in re.findall(
r"^pub extern \"C\" fn ([A_Za-z0-9_]+)\(([^{]+)?\)"
r"(\s+-> ([^{]+))?",
buf,
re.M | re.DOTALL):
args = []
fnName = fn[0]
for arg in fn[1].split(","):
if not arg:
continue
arg_name, rs_type = arg.split(":", 1)
arg_name = arg_name.strip()
rs_type = rs_type.strip()
c_type = convert_type(rs_type)
if arg_name != "_":
args.append("%s %s" % (c_type, arg_name))
else:
args.append(c_type)
if not args:
args.append("void")
retType = fn[3].strip()
if retType == "":
returns = "void"
else:
returns = convert_type(retType)
writer.write(u"%s %s(%s);\n" % (returns, fnName, ", ".join(args)))
if writer.tell() > 0:
print("Writing %s" % (output_filename))
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
with open(output_filename, "w") as output:
output.write(template % {
"prototypes": writer.getvalue(),
"name": os.path.basename(output_filename).replace(
"-", "_").replace(".", "_").upper()
})
def main():
rust_top = os.path.dirname(sys.argv[0])
os.chdir(rust_top)
for dirpath, dirnames, filenames in os.walk("./src"):
for filename in filenames:
if filename.endswith(".rs"):
path = os.path.join(dirpath, filename)
gen_headers(path)
if __name__ == "__main__":
sys.exit(main())
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/addImport/newFirstImportInProjectGroupWithExistingBlankLineAbove/main.py
|
75
|
import sys
import b
print(sys, a, b)
|
iulian787/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/admixtools/package.py
|
2
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Admixtools(MakefilePackage):
"""The ADMIXTOOLS package implements 5 methods described in
Patterson et al. (2012) Ancient Admixture in Human History. Details
of the methods and algorithm can be found in this paper.."""
homepage = "https://github.com/DReichLab/AdmixTools"
url = "https://github.com/DReichLab/AdmixTools/archive/v7.0.1.tar.gz"
version('7.0.1', sha256='182dd6f55109e9a1569b47843b0d1aa89fe4cf4a05f9292519b9811faea67a20')
version('7.0', sha256='c00faab626f02bbf9c25c6d2dcf661db225776e9ed61251f164e5edeb5a448e5')
version('6.0', sha256='8fcd6c6834c7b33afdd7188516856d9c66b53c33dc82e133b72b56714fb67ad5')
version('5.1', sha256='42b584cc785abfdfa9f39a341bdf81f800639737feaf3d07702de4a2e373557e')
version('5.0', sha256='9f00637eac84c1ca152b65313d803616ee62c4156c7c737a33f5b31aeeac1367')
version('1.0.1', sha256='ef3afff161e6a24c0857678373138edb1251c24d7b5308a07f10bdb0dedd44d0')
version('1.0', sha256='cf0d6950285e801e8a99c2a0b3dbbbc941a78e867af1767b1d002ec3f5803c4b')
depends_on('lapack')
depends_on('gsl')
build_directory = 'src'
def edit(self, spec, prefix):
makefile = FileFilter('src/Makefile')
lapackflags = spec['lapack'].libs.ld_flags
makefile.filter('override LDLIBS += -lgsl -lopenblas -lm -lnick',
'override LDLIBS += -lgsl -lm -lnick ' + lapackflags)
makefile.filter('TOP=../bin', 'TOP=./bin')
def install(self, spec, prefix):
with working_dir('src'):
make('install')
install_tree('bin', prefix.bin)
install('twtable', prefix.bin)
|
gurneyalex/hr
|
refs/heads/8.0
|
__unported__/hr_worked_days_hourly_rate/tests/test_hr_worked_days.py
|
28
|
# -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Odoo Canada. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_worked_days(common.TransactionCase):
def setUp(self):
super(test_worked_days, self).setUp()
self.employee_model = self.registry('hr.employee')
self.user_model = self.registry("res.users")
self.payslip_model = self.registry("hr.payslip")
self.worked_days_model = self.registry("hr.payslip.worked_days")
self.contract_model = self.registry("hr.contract")
self.context = self.user_model.context_get(self.cr, self.uid)
# Create an employee
self.employee_id = self.employee_model.create(
self.cr, self.uid, {'name': 'Employee 1'}, context=self.context
)
# Create a contract for the employee
self.contract_id = self.contract_model.create(
self.cr, self.uid,
{
'employee_id': self.employee_id,
'name': 'Contract 1',
'wage': 50000,
},
context=self.context
)
# Create a payslip
self.payslip_id = self.payslip_model.create(
self.cr, self.uid,
{
'employee_id': self.employee_id,
'contract_id': self.contract_id,
'date_from': '2014-01-01',
'date_to': '2014-01-31',
},
context=self.context,
)
def tearDown(self):
self.payslip_model.unlink(
self.cr, self.uid, [self.payslip_id], context=self.context)
self.contract_model.unlink(
self.cr, self.uid, [self.contract_id], context=self.context)
self.employee_model.unlink(
self.cr, self.uid, [self.employee_id], context=self.context)
super(test_worked_days, self).tearDown()
def test_total(self):
worked_days_id = self.worked_days_model.create(
self.cr, self.uid,
{
'date_from': '2014-01-01',
'date_to': '2014-01-05',
'number_of_hours': 40,
'hourly_rate': 25,
'rate': 150,
'payslip_id': self.payslip_id,
'code': 'test',
'name': 'test',
'contract_id': self.contract_id,
},
context=self.context,
)
worked_days = self.worked_days_model.browse(
self.cr, self.uid, worked_days_id, context=self.context)
self.assertEqual(worked_days.total, 40 * 25 * 1.5)
|
rhertzog/django
|
refs/heads/master
|
tests/test_client/auth_backends.py
|
315
|
from django.contrib.auth.backends import ModelBackend
class TestClientBackend(ModelBackend):
pass
|
cielpy/shadowsocks
|
refs/heads/master
|
shadowsocks/shell.py
|
652
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None) \
and not config.get('manager_address'):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if config.get('server_port', None) and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', None)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
|
Azure/azure-linux-extensions
|
refs/heads/master
|
DSC/httpclientfactory.py
|
5
|
#!/usr/bin/env python2
#
# Copyright (C) Microsoft Corporation, All rights reserved.
import os
from curlhttpclient import CurlHttpClient
from urllib2httpclient import Urllib2HttpClient
PY_MAJOR_VERSION = 0
PY_MINOR_VERSION = 1
PY_MICRO_VERSION = 2
class HttpClientFactory:
"""Factory which returns the appropriate HttpClient based on the provided python version.
Targets :
[2.4.0 - 2.7.9[ : CurlHttpclient
[2.7.9 - 2.7.9+ : Urllib2Httpclient
This is due to the lack of built-in strict certificate verification prior to 2.7.9.
The ssl module was also unavailable for [2.4.0 - 2.6.0[.
"""
def __init__(self, cert, key, insecure=False):
self.cert = cert
self.key = key
self.insecure = insecure
self.proxy_configuration = None
def create_http_client(self, version_info):
"""Create a new instance of the appropriate HttpClient.
Args:
version_info : array, the build-in python version_info array.
insecure : bool, when set to True, httpclient wil bypass certificate verification.
Returns:
An instance of CurlHttpClient if the installed Python version is below 2.7.9
An instance of Urllib2 if the installed Python version is or is above 2.7.9
"""
if version_info[PY_MAJOR_VERSION] == 2 and version_info[PY_MINOR_VERSION] < 7:
return CurlHttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)
elif version_info[PY_MAJOR_VERSION] == 2 and version_info[PY_MINOR_VERSION] <= 7 and version_info[
PY_MICRO_VERSION] < 9:
return CurlHttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)
else:
return Urllib2HttpClient(self.cert, self.key, self.insecure, self.proxy_configuration)
|
google/mirandum
|
refs/heads/master
|
alerts/extralife/apps.py
|
1
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.apps import AppConfig
class ExtralifeConfig(AppConfig):
name = 'extralife'
verbose_name = 'Extralife Application'
def ready(self):
import extralife.signals
|
EricSekyere/python
|
refs/heads/master
|
Silocean/0010/Test.py
|
37
|
__author__ = 'Tracy'
import Image,ImageDraw,ImageFont,ImageFilter
import random
image = Image.new('RGB', (50*4, 50), (255,255,255))
font = ImageFont.truetype('DejaVuSansMono.ttf', 24)
draw = ImageDraw.Draw(image)
def randColor():
return (random.randint(64,255), random.randint(64,255), random.randint(64,255))
def randColor2():
return (random.randint(32,127), random.randint(32,127), random.randint(32,127))
def randChar():
return chr(random.randint(65,90))
for x in range(50*4):
for y in range(50):
draw.point((x, y), randColor())
for x in range(4):
draw.text((50*x+10, 10), randChar(), randColor2(), font)
image = image.filter(ImageFilter.BLUR)
image.save('Result.jpg')
image.show()
|
kalafut/finiki
|
refs/heads/master
|
finiki.py
|
1
|
#!/usr/bin/env python3
import os
from collections import OrderedDict
from contextlib import contextmanager
from flask import Flask, redirect, render_template, request
import jinja2
import mistune
app = Flask(__name__)
markdown = mistune.Markdown(hard_wrap=True)
try:
ROOT = os.environ['FINIKI_ROOT']
except KeyError:
print('Set the FINIKI_ROOT environment variable to your document root.')
exit(1)
DEFAULT_EXT = 'md'
RECENT_CNT = 8
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>', methods=['GET', 'POST'])
def index(path):
isdir = os.path.isdir(tod(path))
if isdir and not path.endswith('/') and path != '':
return redirect(path + '/'), 303
if request.method == 'POST':
if 'delete' in request.form:
os.remove(tof(path))
return redirect('/')
else:
with opener(path, 'w') as f:
f.write(request.form['text'])
return redirect(path)
if isdir:
d, p = scan(path)
return render_template('dir.html', dirs=d, pages=p, recents=load_recent(skip_first=False))
action = request.args.get('action')
if action == 'edit':
with opener(path) as f:
contents = f.read()
return render_template('edit.html', text=contents, path=path, title=os.path.basename(path))
elif action == 'delete':
return render_template('delete.html', path=path)
try:
with opener(path) as f:
contents = f.read()
save_recent(path)
return render_template('show.html', text=markdown(contents), path=path, recents=load_recent(skip_first=True), title=os.path.basename(path))
except NotADirectoryError:
msg = "You cannot have paths under a page."
return render_template('error.html', message=msg)
except FileNotFoundError:
contents = 'New Page'
return render_template('edit.html', text=contents, path=path)
def scan(path):
d, p = [], []
for entry in os.scandir(tod(path)):
if not entry.name.startswith(('.', '__')):
if entry.is_dir():
d.append(entry.name)
else:
p.append(os.path.splitext(entry.name)[0])
return d, p
def load_recent(skip_first=False, recent_cnt=RECENT_CNT):
with open(tof('__system/recent')) as f:
lines = f.readlines()
start = 1 if skip_first else 0
return [x.strip() for x in lines[start:start + recent_cnt]]
def save_recent(path):
recents = OrderedDict.fromkeys([path] + load_recent())
with open(tof('__system/recent'), 'w') as f:
f.write('\n'.join(recents.keys()))
@contextmanager
def opener(path, mode='r'):
if mode == 'w':
os.makedirs(os.path.dirname(tof(path)), exist_ok=True)
with open(tof(path), mode) as f:
yield f
@app.template_filter('basename')
def reverse_filter(s):
return os.path.basename(s)
def tof(path):
return "{}.{}".format(os.path.join(ROOT, path), DEFAULT_EXT)
def tod(path):
return os.path.join(ROOT, path)
if __name__ == "__main__":
app.run()
|
SNeuhausen/training_management
|
refs/heads/master
|
models/concrete_degree/concrete_degree.py
|
1
|
# -*- coding: utf-8 -*-
# /#############################################################################
#
# Stephan Neuhausen.
# Copyright (C) 20014-TODAY Stephan Neuhausen iad.de.
#
# /#############################################################################
from openerp import models, fields
from openerp.addons.training_management.models.model_names import ModelNames
class ConcreteDegree(models.Model):
_name = ModelNames.CONCRETE_DEGREE
CONTEXT_KEY__PUBLISHED_BY_IAD_ONLY = "published_by_iad_only"
abstract_degree_id = fields.Many2one(
comodel_name=ModelNames.ABSTRACT_DEGREE,
required=True,
domain=lambda self: self._domain__abstract_degree_id(),
ondelete="restrict",
string="Abschluss",
)
date = fields.Date(
string="Datum",
default=lambda self: fields.Date.today(),
)
evaluation = fields.Text(string="Beurteilung")
number_of_exams = fields.Integer(string=u"Anz. Prüfungen", default=0)
is_included_in_prints = fields.Boolean(string=u"Im Ausdruck enthalten", default=True)
education_plan_id = fields.Many2one(
comodel_name=ModelNames.EDUCATION_PLAN,
ondelete="cascade",
)
partner_id = fields.Many2one(
comodel_name=ModelNames.PARTNER,
ondelete="cascade",
)
course_id = fields.Many2one(
comodel_name=ModelNames.TEACHING,
ondelete="cascade",
)
def _domain__abstract_degree_id(self):
context = self.env.context
if context.get(self.CONTEXT_KEY__PUBLISHED_BY_IAD_ONLY, False):
return [('is_publishable_by_iad', '=', True)]
else:
return []
|
soerendip42/rdkit
|
refs/heads/master
|
rdkit/VLib/NodeLib/__init__.py
|
6
|
__all__=['SmartsMolFilter','SDSupply','SmartsRemover','SmilesDupeFilter',
'SmilesOutput']
|
SerpentCS/odoo
|
refs/heads/8.0
|
addons/l10n_es/__init__.py
|
307
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
VShangxiao/v2ex
|
refs/heads/master
|
mapreduce/operation/counters.py
|
21
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters-related operations."""
__all__ = ['Increment']
class Increment(object):
"""Increment counter operation."""
def __init__(self, counter_name, delta=1):
"""Constructor.
Args:
counter_name: name of the counter as string
delta: increment delta as int.
"""
self.counter_name = counter_name
self.delta = delta
def __call__(self, context):
"""Execute operation.
Args:
context: mapreduce context as context.Context.
"""
context.counters.increment(self.counter_name, self.delta)
|
yatish27/mase
|
refs/heads/master
|
python101/code/Tagger.py
|
14
|
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
from Wobbler import *
class Missed(Exception):
"""Exception raised when a turtle tries to tag someone too far away."""
class Tagger(Wobbler):
"""Represents a Turtle that plays tag."""
def __init__(self, world, speed=1, clumsiness=60, color='red'):
# call the __init__ method from Wobbler to initialize
# world, speed, clumsiness, and color.
Wobbler.__init__(self, world, speed, clumsiness, color)
# then initialize the additional attributes that Taggers have
self.it = 0
self.sulking = 0
def steer(self):
"""Steers the Wobbler in the general direction it should go.
Postcondition: the Wobbler's heading may be changed, but
its position may not.
"""
# if sulking, decrement the sulking clock
if self.sulking > 0:
self.sulking -= 1
if self.sulking == 0:
self.color = 'red'
self.speed = self.old_speed
return
# if out of bounds, turn toward the center
if self.distance() > 200:
self.turn_toward(0, 0)
return
# if not it, just wander
if not self.it:
return
# if it, chase the closest player
target = self.closest(self.world.animals)
try:
self.apply_tag(target)
except Missed:
self.chase(target)
def turn_toward(self, x=0, y=0):
"""Turns to face the given point.
x, y: point to turn toward
"""
self.heading = self.away(x, y) + 180
self.redraw()
def away(self, x=0, y=0):
"""Computes the heading away from the given point.
x, y: point to face away from
"""
dx = self.x - x
dy = self.y - y
heading = math.atan2(dy, dx)
return heading * 180 / math.pi
def distance(self, x=0, y=0):
"""Computes the distance from this turtle to the given point.
x, y: point to find distance to
"""
dx = self.x - x
dy = self.y - y
return math.sqrt(dx**2 + dy**2)
def distance_from(self, other):
"""Computes the distance between turtles.
other: Turtle object
"""
return self.distance(other.x, other.y)
def youre_it(self):
"""Makes this turtle it."""
self.it = 1
self.old_speed = self.speed
self.old_color = self.color
self.speed = 0
self.color = 'blue'
self.sulking = 200
self.redraw()
def not_it(self):
"""Makes this turtle not it."""
self.it = 0
self.color = self.old_color
self.redraw()
def flee(self, other):
"""Faces away from the other turtle.
other: Turtle object
"""
self.heading = self.away(other.x, other.y)
def chase(self, other):
"""Faces the other turtle.
other: Turtle object
"""
self.turn_toward(other.x, other.y)
def closest(self, others):
"""Return closest animal in the list (other than self).
others: list of Animals
"""
t = [(self.distance_from(animal), animal)
for animal in others if animal is not self]
(distance, animal) = min(t)
return animal
def apply_tag(self, other):
"""Tried to tag the other turtle.
If it is too far away, raises an exception.
other: Turtle object
"""
if self.distance_from(other) < 10:
self.not_it()
other.youre_it()
else:
raise Missed
if __name__ == '__main__':
world = make_world(Tagger)
world.animals[0].youre_it()
world.mainloop()
|
idovear/odoo
|
refs/heads/master
|
addons/website_livechat/models/website.py
|
380
|
from openerp.osv import osv, fields
class website(osv.osv):
_inherit = "website"
_columns = {
'channel_id': fields.many2one('im_livechat.channel', string="Channel"),
}
class website_config_settings(osv.osv_memory):
_inherit = 'website.config.settings'
_columns = {
'channel_id': fields.related('website_id', 'channel_id', type='many2one', relation='im_livechat.channel', string='Live Chat Channel'),
}
|
dreamapplehappy/myblog
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/modeline.py
|
292
|
# -*- coding: utf-8 -*-
"""
pygments.modeline
~~~~~~~~~~~~~~~~~
A simple modeline parser (based on pymodeline).
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
__all__ = ['get_filetype_from_buffer']
modeline_re = re.compile(r'''
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
''', re.VERBOSE)
def get_filetype_from_line(l):
m = modeline_re.search(l)
if m:
return m.group(1)
def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for l in lines[max_lines:0:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
return None
|
npiganeau/odoo
|
refs/heads/master
|
addons/product_email_template/__openerp__.py
|
65
|
# -*- coding: utf-8 -*-
{
'name': 'Product Email Template',
'depends': ['account'],
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Add email templates to products to be send on invoice confirmation
==================================================================
With this module, link your products to a template to send complete information and tools to your customer.
For instance when invoicing a training, the training agenda and materials will automatically be sent to your customers.'
""",
'website': 'http://www.openerp.com',
'demo': [
'data/product_demo.xml',
],
'data': [
'views/product_view.xml',
'views/email_template_view.xml',
],
'installable': True,
'auto_install': False,
}
|
markroxor/gensim
|
refs/heads/develop
|
gensim/models/atmodel.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2016 Olavur Mortensen <olavurmortensen@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Author-topic model in Python.
This module trains the author-topic model on documents and corresponding author-document
dictionaries. The training is online and is constant in memory w.r.t. the number of
documents. The model is *not* constant in memory w.r.t. the number of authors.
The model can be updated with additional documents after training has been completed. It is
also possible to continue training on the existing data.
The model is closely related to Latent Dirichlet Allocation. The AuthorTopicModel class
inherits the LdaModel class, and its usage is thus similar.
Distributed computation and multiprocessing is not implemented at the moment, but may be
coming in the future.
The model was introduced by Rosen-Zvi and co-authors in 2004
(https://mimno.infosci.cornell.edu/info6150/readings/398.pdf).
A tutorial can be found at
https://github.com/RaRe-Technologies/gensim/tree/develop/docs/notebooks/atmodel_tutorial.ipynb.
"""
# TODO: this class inherits LdaModel and overwrites some methods. There is some code
# duplication still, and a refactor could be made to avoid this. Comments with "TODOs"
# are included in the code where this is the case, for example in the log_perplexity
# and do_estep methods.
import logging
import numpy as np # for arrays, array broadcasting etc.
from copy import deepcopy
from shutil import copyfile
from os.path import isfile
from os import remove
from gensim import utils
from gensim.models import LdaModel
from gensim.models.ldamodel import LdaState
from gensim.matutils import dirichlet_expectation
from gensim.corpora import MmCorpus
from itertools import chain
from scipy.special import gammaln # gamma function utils
from six.moves import xrange
import six
logger = logging.getLogger('gensim.models.atmodel')
class AuthorTopicState(LdaState):
"""
NOTE: distributed mode not available yet in the author-topic model. This AuthorTopicState
object is kept so that when the time comes to imlement it, it will be easier.
Encapsulate information for distributed computation of AuthorTopicModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, lambda_shape, gamma_shape):
self.eta = eta
self.sstats = np.zeros(lambda_shape)
self.gamma = np.zeros(gamma_shape)
self.numdocs = 0
self.dtype = np.float64 # To be compatible with LdaState
def construct_doc2author(corpus, author2doc):
"""Make a mapping from document IDs to author IDs."""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
def construct_author2doc(doc2author):
"""Make a mapping from author IDs to document IDs."""
# First get a set of all authors.
authors_ids = set()
for d, a_doc_ids in doc2author.items():
for a in a_doc_ids:
authors_ids.add(a)
# Now construct the dictionary.
author2doc = {}
for a in authors_ids:
author2doc[a] = []
for d, a_ids in doc2author.items():
if a in a_ids:
author2doc[a].append(d)
return author2doc
class AuthorTopicModel(LdaModel):
"""
The constructor estimates the author-topic model parameters based
on a training corpus:
>>> model = AuthorTopicModel(corpus, num_topics=10, author2doc=author2doc, id2word=id2word)
The model can be updated (trained) with new documents via
>>> model.update(other_corpus, other_author2doc)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
If the iterable corpus and one of author2doc/doc2author dictionaries are given,
start training straight away. If not given, the model is left untrained
(presumably because you want to call the `update` method manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`author2doc` is a dictionary where the keys are the names of authors, and the
values are lists of documents that the author contributes to.
`doc2author` is a dictionary where the keys are document IDs (indexes to corpus)
and the values are lists of author names. I.e. this is the reverse mapping of
`author2doc`. Only one of the two, `author2doc` and `doc2author` have to be
supplied.
`passes` is the number of times the model makes a pass over the entire trianing
data.
`iterations` is the maximum number of times the model loops over each document
(M-step). The iterations stop when convergence is reached.
`chunksize` controls the size of the mini-batches.
`alpha` and `eta` are hyperparameters that affect sparsity of the author-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value 'auto', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates. Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively. `decay` controls how quickly old documents are
forgotten, while `offset` down-weights early iterations.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be an integer or a numpy.random.RandomState object. Set the
state of the random number generator inside the author-topic model, to ensure
reproducibility of your experiments, for example.
`serialized` indicates whether the input corpora to the model are simple
in-memory lists (`serialized = False`) or saved to the hard-drive
(`serialized = True`). Note that this behaviour is quite different from
other Gensim models. If your data is too large to fit in to memory, use
this functionality. Note that calling `AuthorTopicModel.update` with new
data may be cumbersome as it requires all the existing data to be
re-serialized.
`serialization_path` must be set to a filepath, if `serialized = True` is
used. Use, for example, `serialization_path = /tmp/serialized_model.mm` or use your
working directory by setting `serialization_path = serialized_model.mm`. An existing
file *cannot* be overwritten; either delete the old file or choose a different
name.
Example:
>>> model = AuthorTopicModel(corpus, num_topics=100, author2doc=author2doc, id2word=id2word) # train model
>>> model.update(corpus2) # update the author-topic model with additional documents
>>> model = AuthorTopicModel(
... corpus, num_topics=50, author2doc=author2doc, id2word=id2word, alpha='auto', eval_every=5)
"""
# NOTE: this doesn't call constructor of a base class, but duplicates most of this code
# so we have to set dtype to float64 default here
self.dtype = np.float64
# NOTE: as distributed version of this model is not implemented, "distributed" is set to false. Some of the
# infrastructure to implement a distributed author-topic model is already in place,
# such as the AuthorTopicState.
distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish input space dimensionality"
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute the author-topic model over an empty collection (no terms)")
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if serialized and not serialization_path:
raise ValueError(
"If serialized corpora are used, a the path to a folder "
"where the corpus should be saved must be provided (serialized_path)."
)
if serialized and serialization_path:
assert not isfile(serialization_path), \
"A file already exists at the serialization_path path; " \
"choose a different serialization_path, or delete the file."
self.serialization_path = serialization_path
# Initialize an empty self.corpus.
self.init_empty_corpus()
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
if isinstance(eta, six.string_types):
if eta == 'asymmetric':
raise ValueError("The 'asymmetric' option cannot be used for eta")
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms)
)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# Initialize the variational distributions q(beta|lambda) and q(theta|gamma)
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None and (author2doc is not None or doc2author is not None):
use_numpy = self.dispatcher is not None
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
def __str__(self):
return "AuthorTopicModel(num_terms=%s, num_topics=%s, num_authors=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.num_authors, self.decay, self.chunksize)
def init_empty_corpus(self):
"""
Initialize an empty corpus. If the corpora are to be treated as lists, simply
initialize an empty list. If serialization is used, initialize an empty corpus
of the class `gensim.corpora.MmCorpus`.
"""
if self.serialized:
# Initialize the corpus as a serialized empty list.
# This corpus will be extended in self.update.
MmCorpus.serialize(self.serialization_path, []) # Serialize empty corpus.
self.corpus = MmCorpus(self.serialization_path) # Store serialized corpus object in self.corpus.
else:
# All input corpora are assumed to just be lists.
self.corpus = []
def extend_corpus(self, corpus):
"""
Add new documents in `corpus` to `self.corpus`. If serialization is used,
then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list
of documents, is simply extended.
"""
if self.serialized:
# Re-serialize the entire corpus while appending the new documents.
if isinstance(corpus, MmCorpus):
# Check that we are not attempting to overwrite the serialized corpus.
assert self.corpus.input != corpus.input, \
'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus) # A generator with the old and new documents.
# Make a temporary copy of the file where the corpus is serialized.
copyfile(self.serialization_path, self.serialization_path + '.tmp')
self.corpus.input = self.serialization_path + '.tmp' # Point the old corpus at this temporary file.
# Re-serialize the old corpus, and extend it with the new corpus.
MmCorpus.serialize(self.serialization_path, corpus_chain)
self.corpus = MmCorpus(self.serialization_path) # Store the new serialized corpus object in self.corpus.
remove(self.serialization_path + '.tmp') # Remove the temporary file again.
else:
# self.corpus and corpus are just lists, just extend the list.
# First check that corpus is actually a list.
assert isinstance(corpus, list), "If serialized == False, all input corpora must be lists."
self.corpus.extend(corpus)
def compute_phinorm(self, expElogthetad, expElogbetad):
"""Efficiently computes the normalizing factor in phi."""
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = expElogtheta_sum.dot(expElogbetad) + 1e-100
return phinorm
def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
"""
Given a chunk of sparse document vectors, update gamma (parameters
controlling the topic weights) for each author corresponding to the
documents in the chunk.
The whole input chunk of document is assumed to fit in RAM; chunking of
a large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`.
`gamma_cunk` is of shape `len(chunk_authors) x self.num_topics`, where
`chunk_authors` is the number of authors in the documents in the
current chunk.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Stack all the computed gammas into this output array.
gamma_chunk = np.zeros((0, self.num_topics))
# Now, for each document d update gamma and phi w.r.t. all authors in those documents.
for d, doc in enumerate(chunk):
if chunk_doc_idx is not None:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get the IDs and counts of all the words in the current document.
# TODO: this is duplication of code in LdaModel. Refactor.
if doc and not isinstance(doc[0][0], six.integer_types + (np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
cts = np.array([cnt for _, cnt in doc])
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
gammad = self.state.gamma[authors_d, :] # gamma of document d before update.
tilde_gamma = gammad.copy() # gamma that will be updated.
# Compute the expectation of the log of the Dirichlet parameters theta and beta.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
# Compute the normalizing constant of phi for the current document.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Iterate between gamma and phi until convergence
for _ in xrange(self.iterations):
lastgamma = tilde_gamma.copy()
# Update gamma.
# phi is computed implicitly below,
for ai, a in enumerate(authors_d):
tilde_gamma[ai, :] = self.alpha + len(self.author2doc[self.id2author[a]])\
* expElogthetad[ai, :] * np.dot(cts / phinorm, expElogbetad.T)
# Update gamma.
# Interpolation between document d's "local" gamma (tilde_gamma),
# and "global" gamma (gammad).
tilde_gamma = (1 - rhot) * gammad + rhot * tilde_gamma
# Update Elogtheta and Elogbeta, since gamma and lambda have been updated.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
# Update the normalizing constant in phi.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Check for convergence.
# Criterion is mean change in "local" gamma.
meanchange_gamma = np.mean(abs(tilde_gamma - lastgamma))
gamma_condition = meanchange_gamma < self.gamma_threshold
if gamma_condition:
converged += 1
break
# End of iterations loop.
# Store the updated gammas in the model state.
self.state.gamma[authors_d, :] = tilde_gamma
# Stack the new gammas into the output array.
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, cts / phinorm)
if len(chunk) > 1:
logger.debug(
"%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations
)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * \sum_a phi_{dwak}
# = \sum_d n_{dw} * exp{Elogtheta_{ak} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma_chunk, sstats
def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
# TODO: this method is somewhat similar to the one in LdaModel. Refactor if possible.
if state is None:
state = self.state
gamma, sstats = self.inference(
chunk, author2doc, doc2author, rhot,
collect_sstats=True, chunk_doc_idx=chunk_doc_idx
)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
"""
Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.
"""
# TODO: This method is very similar to the one in LdaModel. Refactor.
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / \
(subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
If update is called with authors that already exist in the model, it will
resume training on not only new documents for that author, but also the
previously seen documents. This is necessary for those authors' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are
to appended to all the previously seen documents, and author2doc is
combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
`update()`.
It is not possible to add new authors to existing documents, as all
documents in `corpus` are assumed to be new documents.
Args:
corpus (gensim corpus): The corpus with which the author-topic model should be updated.
author2doc (dictionary): author to document mapping corresponding to indexes in input
corpus.
doc2author (dictionary): document to author mapping corresponding to indexes in input
corpus.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`AuthorTopicModel` constructor.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# TODO: if deepcopy is not used here, something goes wrong. When unit tests are run (specifically "testPasses"),
# the process simply gets killed.
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
# TODO: it is not possible to add new authors to an existing document (all input documents are treated
# as completely new documents). Perhaps this functionality could be implemented.
# If it's absolutely necessary, the user can delete the documents that have new authors, and call update
# on them with the new and old authors.
if corpus is None:
# Just keep training on the already available data.
# Assumes self.update() has been called before with input documents and corresponding authors.
assert self.total_docs > 0, 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in xrange(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if doc2author is None and author2doc is None:
raise ValueError(
'at least one of author2doc/doc2author must be specified, to establish input space dimensionality'
)
# If either doc2author or author2doc is missing, construct them from the other.
if doc2author is None:
doc2author = construct_doc2author(corpus, author2doc)
elif author2doc is None:
author2doc = construct_author2doc(doc2author)
# Number of authors that need to be updated.
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
logger.warning("AuthorTopicModel.update() called with an empty corpus")
return
self.total_docs += len_input_corpus
# Add new documents in corpus to self.corpus.
self.extend_corpus(corpus)
# Obtain a list of new authors.
new_authors = []
# Sorting the author names makes the model more reproducible.
for a in sorted(author2doc.keys()):
if not self.author2doc.get(a):
new_authors.append(a)
num_new_authors = len(new_authors)
# Add new authors do author2id/id2author dictionaries.
for a_id, a_name in enumerate(new_authors):
self.author2id[a_name] = a_id + self.num_authors
self.id2author[a_id + self.num_authors] = a_name
# Increment the number of total authors seen.
self.num_authors += num_new_authors
# Initialize the variational distributions q(theta|gamma)
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Combine author2doc with self.author2doc.
# First, increment the document IDs by the number of previously seen documents.
for a, doc_ids in author2doc.items():
doc_ids = [d + self.total_docs - len_input_corpus for d in doc_ids]
# For all authors in the input corpus, add the new documents.
for a, doc_ids in author2doc.items():
if self.author2doc.get(a):
# This is not a new author, append new documents.
self.author2doc[a].extend(doc_ids)
else:
# This is a new author, create index.
self.author2doc[a] = doc_ids
# Add all new documents to self.doc2author.
for d, a_list in doc2author.items():
self.doc2author[d] = a_list
# Train on all documents of authors in input_corpus.
train_corpus_idx = []
for _ in author2doc.keys(): # For all authors in input corpus.
for doc_ids in self.author2doc.values(): # For all documents in total corpus.
train_corpus_idx.extend(doc_ids)
# Make the list of training documents unique.
train_corpus_idx = list(set(train_corpus_idx))
# train_corpus_idx is only a list of indexes, so "len" is valid.
lencorpus = len(train_corpus_idx)
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s author-topic training, %s topics, %s authors, "
"%i passes over the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, num_input_authors, passes, lencorpus, updateafter,
evalafter, iterations, gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
# gamma is not needed in "other", thus its shape is (0, 0).
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for chunk_no, chunk_doc_idx in enumerate(
utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
# log_perplexity requires the indexes of the documents being evaluated, to know what authors
# correspond to the documents.
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# do_estep requires the indexes of the documents being trained on, to know what authors
# correspond to the documents.
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
There are basically two use cases of this method:
1. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
2. `chunk` is a test set (held-out data), and author2doc and doc2author
corrsponding to this test set are provided. There must not be any new authors
passed to this method. `chunk_doc_idx` is not needed in this case.
To obtain the per-word bound, compute:
>>> corpus_words = sum(cnt for document in corpus for _, cnt in document)
>>> model.bound(corpus, author2doc=author2doc, doc2author=doc2author) / corpus_words
"""
# TODO: enable evaluation of documents with new authors. One could, for example, make it
# possible to pass a list of documents to self.inference with no author dictionaries,
# assuming all the documents correspond to one (unseen) author, learn the author's
# gamma, and return gamma (without adding it to self.state.gamma). Of course,
# collect_sstats should be set to false, so that the model is not updated w.r.t. these
# new documents.
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if author2doc is None and doc2author is None:
# Evaluating on training documents (chunk of self.corpus).
author2doc = self.author2doc
doc2author = self.doc2author
if not chunk_doc_idx:
# If author2doc and doc2author are not provided, chunk is assumed to be a subset of
# self.corpus, and chunk_doc_idx is thus required.
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided. '
'Consult documentation of bound method.'
)
elif author2doc is not None and doc2author is not None:
# Training on held-out documents (documents not seen during training).
# All authors in dictionaries must still be seen during training.
for a in author2doc.keys():
if not self.author2doc.get(a):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided, not both. '
'Consult documentation of bound method.'
)
else:
raise ValueError(
'Either both author2doc and doc2author should be provided, or neither. '
'Consult documentation of bound method.'
)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for d, doc in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
ids = np.array([id for id, _ in doc]) # Word IDs in doc.
cts = np.array([cnt for _, cnt in doc]) # Word counts.
if d % self.chunksize == 0:
logger.debug("bound: at document #%i in chunk", d)
# Computing the bound requires summing over expElogtheta[a, k] * expElogbeta[k, v], which
# is the same computation as in normalizing phi.
phinorm = self.compute_phinorm(expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += np.log(1.0 / len(authors_d)) * sum(cts) + cts.dot(np.log(phinorm))
# Compensate likelihood for when `chunk` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
word_score *= subsample_ratio
# E[log p(theta | alpha) - log q(theta | gamma)]
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum((self.alpha - gamma[a, :]) * Elogtheta[a, :])
theta_score += np.sum(gammaln(gamma[a, :]) - gammaln(self.alpha))
theta_score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :]))
# theta_score is rescaled in a similar fashion.
# TODO: treat this in a more general way, similar to how it is done with word_score.
theta_score *= self.num_authors / len(author2doc)
# E[log p(beta | eta) - log q (beta | lambda)]
beta_score = 0.0
beta_score += np.sum((self.eta - _lambda) * Elogbeta)
beta_score += np.sum(gammaln(_lambda) - gammaln(self.eta))
sum_eta = np.sum(self.eta)
beta_score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
total_score = word_score + theta_score + beta_score
return total_score
def get_document_topics(self, word_id, minimum_probability=None):
"""
This method overwrites `LdaModel.get_document_topics` and simply raises an
exception. `get_document_topics` is not valid for the author-topic model,
use `get_author_topics` instead.
"""
raise NotImplementedError(
'Method "get_document_topics" is not valid for the author-topic model. '
'Use the "get_author_topics" method.'
)
def get_author_topics(self, author_name, minimum_probability=None):
"""
Return topic distribution the given author, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `minimum_probability`).
Obtaining topic probabilities of each word, as in LDA (via `per_word_topics`),
is not supported.
"""
author_id = self.author2id[author_name]
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
topic_dist = self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :])
author_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
return author_topics
def __getitem__(self, author_names, eps=None):
"""
Return topic distribution for input author as a list of
(topic_id, topic_probabiity) 2-tuples.
Ingores topics with probaility less than `eps`.
Do not call this method directly, instead use `model[author_names]`.
"""
if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
|
ihsanudin/odoo
|
refs/heads/8.0
|
addons/portal_gamification/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
product-owner/2015scrum
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/binascii.py
|
620
|
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n',__BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
|
rbian/virt-test
|
refs/heads/master
|
virttest/bootstrap.py
|
2
|
import logging
import os
import glob
import shutil
from autotest.client.shared import logging_manager, error
from autotest.client import utils
import utils_misc
import data_dir
import asset
import cartesian_config
import utils_selinux
import defaults
basic_program_requirements = ['7za', 'tcpdump', 'nc', 'ip', 'arping']
recommended_programs = {'qemu': [('qemu-kvm', 'kvm'), ('qemu-img',),
('qemu-io',)],
'libvirt': [('virsh',), ('virt-install',),
('fakeroot',), ('semanage',),
('getfattr',), ('restorecon',)],
'openvswitch': [],
'lvsb': [('semanage',), ('getfattr',), ('restorecon',)],
'v2v': [],
'libguestfs': [('perl',)]}
mandatory_programs = {'qemu': basic_program_requirements + ['gcc'],
'libvirt': basic_program_requirements,
'openvswitch': basic_program_requirements,
'lvsb': ['virt-sandbox', 'virt-sandbox-service', 'virsh'],
'v2v': basic_program_requirements,
'libguestfs': basic_program_requirements}
mandatory_headers = {'qemu': ['Python.h', 'types.h', 'socket.h', 'unistd.h'],
'libvirt': [],
'openvswitch': [],
'v2v': [],
'lvsb': [],
'libguestfs': []}
first_subtest = {'qemu': ['unattended_install', 'steps'],
'libvirt': ['unattended_install'],
'openvswitch': ['unattended_install'],
'v2v': ['unattended_install'],
'libguestfs': ['unattended_install'],
'lvsb': []}
last_subtest = {'qemu': ['shutdown'],
'libvirt': ['shutdown', 'remove_guest'],
'openvswitch': ['shutdown'],
'v2v': ['shutdown'],
'libguestfs': ['shutdown'],
'lvsb': []}
test_filter = ['__init__', 'cfg', 'dropin.py']
def get_guest_os_info_list(test_name, guest_os):
"""
Returns a list of matching assets compatible with the specified test name
and guest OS
"""
os_info_list = []
cartesian_parser = cartesian_config.Parser()
cartesian_parser.parse_file(data_dir.get_backend_cfg_path(test_name, 'guest-os.cfg'))
cartesian_parser.only_filter(guest_os)
dicts = cartesian_parser.get_dicts()
for params in dicts:
image_name = params.get('image_name', 'image').split('/')[-1]
shortname = params.get('shortname', guest_os)
os_info_list.append({'asset': image_name, 'variant': shortname})
if not os_info_list:
logging.error("Could not find any assets compatible with %s for %s",
guest_os, test_name)
raise ValueError("Missing compatible assets for %s", guest_os)
return os_info_list
def _get_config_filter():
config_filter = ['__init__', ]
for provider_subdir in asset.get_test_provider_subdirs():
config_filter.append(os.path.join('%s' % provider_subdir, 'cfg'))
return config_filter
config_filter = _get_config_filter()
def verify_recommended_programs(t_type):
cmds = recommended_programs[t_type]
for cmd_aliases in cmds:
for cmd in cmd_aliases:
found = None
try:
found = utils_misc.find_command(cmd)
logging.info(found)
break
except ValueError:
pass
if found is None:
if len(cmd_aliases) == 1:
logging.info("Recommended command %s missing. You may "
"want to install it if not building from "
"source.", cmd_aliases[0])
else:
logging.info("Recommended command missing. You may "
"want to install it if not building it from "
"source. Aliases searched: %s", cmd_aliases)
def verify_mandatory_programs(t_type):
failed_cmds = []
cmds = mandatory_programs[t_type]
for cmd in cmds:
try:
logging.info(utils_misc.find_command(cmd))
except ValueError:
logging.error("Required command %s is missing. You must "
"install it", cmd)
failed_cmds.append(cmd)
includes = mandatory_headers[t_type]
available_includes = glob.glob('/usr/include/*/*')
for include in available_includes:
include_basename = os.path.basename(include)
if include_basename in includes:
logging.info(include)
includes.pop(includes.index(include_basename))
if includes:
for include in includes:
logging.error("Required include %s is missing. You may have to "
"install it", include)
failures = failed_cmds + includes
if failures:
raise ValueError('Missing (cmds/includes): %s' % " ".join(failures))
def write_subtests_files(config_file_list, output_file_object, test_type=None):
'''
Writes a collection of individual subtests config file to one output file
Optionally, for tests that we know their type, write the 'virt_test_type'
configuration automatically.
'''
if test_type is not None:
output_file_object.write(" - @type_specific:\n")
output_file_object.write(" variants subtest:\n")
for provider_name, config_path in config_file_list:
config_file = open(config_path, 'r')
write_test_type_line = False
write_provider_line = False
for line in config_file.readlines():
if line.startswith('- ') and provider_name is not None:
name, deps = line.split(":")
name = name[1:].strip()
if name[0] == "@":
name = name[1:]
line = "- %s.%s:%s" % (provider_name, name, deps)
# special virt_test_type line output
if test_type is not None:
if write_test_type_line:
type_line = (" virt_test_type = %s\n" %
test_type)
output_file_object.write(type_line)
provider_line = (" provider = %s\n" %
provider_name)
output_file_object.write(provider_line)
write_test_type_line = False
elif line.startswith('- '):
write_test_type_line = True
output_file_object.write(" %s" % line)
else:
if write_provider_line:
provider_line = (" provider = %s\n" %
provider_name)
output_file_object.write(provider_line)
write_provider_line = False
elif line.startswith('- '):
write_provider_line = True
# regular line output
output_file_object.write(" %s" % line)
config_file.close()
def get_directory_structure(rootdir, guest_file):
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
previous_indent = 0
indent = 0
number_variants = 0
for path, subdirs, files in os.walk(rootdir):
folders = path[start:].split(os.sep)
folders = folders[1:]
indent = len(folders)
if indent > previous_indent:
guest_file.write("%svariants:\n" %
(4 * (indent + number_variants - 1) * " "))
number_variants += 1
elif indent < previous_indent:
number_variants = indent
indent += number_variants
try:
base_folder = folders[-1]
except IndexError:
base_folder = []
base_cfg = "%s.cfg" % base_folder
base_cfg_path = os.path.join(os.path.dirname(path), base_cfg)
if os.path.isfile(base_cfg_path):
base_file = open(base_cfg_path, 'r')
for line in base_file.readlines():
guest_file.write("%s%s" % ((4 * (indent - 1) * " "), line))
else:
if base_folder:
guest_file.write("%s- %s:\n" %
((4 * (indent - 1) * " "), base_folder))
variant_printed = False
if files:
files.sort()
for f in files:
if f.endswith(".cfg"):
bf = f[:len(f) - 4]
if bf not in subdirs:
if not variant_printed:
guest_file.write("%svariants:\n" %
((4 * (indent) * " ")))
variant_printed = True
base_file = open(os.path.join(path, f), 'r')
for line in base_file.readlines():
guest_file.write("%s%s" %
((4 * (indent + 1) * " "), line))
indent -= number_variants
previous_indent = indent
def create_guest_os_cfg(t_type):
root_dir = data_dir.get_root_dir()
guest_os_cfg_dir = os.path.join(root_dir, 'shared', 'cfg', 'guest-os')
guest_os_cfg_path = data_dir.get_backend_cfg_path(t_type, 'guest-os.cfg')
guest_os_cfg_file = open(guest_os_cfg_path, 'w')
get_directory_structure(guest_os_cfg_dir, guest_os_cfg_file)
def create_subtests_cfg(t_type):
root_dir = data_dir.get_root_dir()
specific_test_list = []
specific_file_list = []
specific_subdirs = asset.get_test_provider_subdirs(t_type)
provider_names_specific = asset.get_test_provider_names(t_type)
provider_info_specific = []
for specific_provider in provider_names_specific:
provider_info_specific.append(asset.get_test_provider_info(specific_provider))
for subdir in specific_subdirs:
specific_test_list += data_dir.SubdirGlobList(subdir,
'*.py',
test_filter)
specific_file_list += data_dir.SubdirGlobList(subdir,
'*.cfg',
config_filter)
shared_test_list = []
shared_file_list = []
shared_subdirs = asset.get_test_provider_subdirs('generic')
provider_names_shared = asset.get_test_provider_names('generic')
provider_info_shared = []
for shared_provider in provider_names_shared:
provider_info_shared.append(asset.get_test_provider_info(shared_provider))
if not t_type == 'lvsb':
for subdir in shared_subdirs:
shared_test_list += data_dir.SubdirGlobList(subdir,
'*.py',
test_filter)
shared_file_list += data_dir.SubdirGlobList(subdir,
'*.cfg',
config_filter)
all_specific_test_list = []
for test in specific_test_list:
for p in provider_info_specific:
provider_base_path = p['backends'][t_type]['path']
if provider_base_path in test:
provider_name = p['name']
break
basename = os.path.basename(test)
if basename != "__init__.py":
all_specific_test_list.append("%s.%s" %
(provider_name,
basename.split(".")[0]))
all_shared_test_list = []
for test in shared_test_list:
for p in provider_info_shared:
provider_base_path = p['backends']['generic']['path']
if provider_base_path in test:
provider_name = p['name']
break
basename = os.path.basename(test)
if basename != "__init__.py":
all_shared_test_list.append("%s.%s" %
(provider_name,
basename.split(".")[0]))
all_specific_test_list.sort()
all_shared_test_list.sort()
all_test_list = set(all_specific_test_list + all_shared_test_list)
first_subtest_file = []
last_subtest_file = []
non_dropin_tests = []
tmp = []
for shared_file in shared_file_list:
provider_name = None
for p in provider_info_shared:
provider_base_path = p['backends']['generic']['path']
if provider_base_path in shared_file:
provider_name = p['name']
break
shared_file_obj = open(shared_file, 'r')
for line in shared_file_obj.readlines():
line = line.strip()
if line.startswith("type"):
cartesian_parser = cartesian_config.Parser()
cartesian_parser.parse_string(line)
td = cartesian_parser.get_dicts().next()
values = td['type'].split(" ")
for value in values:
if t_type not in non_dropin_tests:
non_dropin_tests.append("%s.%s" %
(provider_name, value))
shared_file_name = os.path.basename(shared_file)
shared_file_name = shared_file_name.split(".")[0]
if shared_file_name in first_subtest[t_type]:
if [provider_name, shared_file] not in first_subtest_file:
first_subtest_file.append([provider_name, shared_file])
elif shared_file_name in last_subtest[t_type]:
if [provider_name, shared_file] not in last_subtest_file:
last_subtest_file.append([provider_name, shared_file])
else:
if [provider_name, shared_file] not in tmp:
tmp.append([provider_name, shared_file])
shared_file_list = tmp
tmp = []
for shared_file in specific_file_list:
provider_name = None
for p in provider_info_specific:
provider_base_path = p['backends'][t_type]['path']
if provider_base_path in shared_file:
provider_name = p['name']
break
shared_file_obj = open(shared_file, 'r')
for line in shared_file_obj.readlines():
line = line.strip()
if line.startswith("type"):
cartesian_parser = cartesian_config.Parser()
cartesian_parser.parse_string(line)
td = cartesian_parser.get_dicts().next()
values = td['type'].split(" ")
for value in values:
if value not in non_dropin_tests:
non_dropin_tests.append("%s.%s" %
(provider_name, value))
shared_file_name = os.path.basename(shared_file)
shared_file_name = shared_file_name.split(".")[0]
if shared_file_name in first_subtest[t_type]:
if [provider_name, shared_file] not in first_subtest_file:
first_subtest_file.append([provider_name, shared_file])
elif shared_file_name in last_subtest[t_type]:
if [provider_name, shared_file] not in last_subtest_file:
last_subtest_file.append([provider_name, shared_file])
else:
if [provider_name, shared_file] not in tmp:
tmp.append([provider_name, shared_file])
specific_file_list = tmp
non_dropin_tests.sort()
non_dropin_tests = set(non_dropin_tests)
dropin_tests = all_test_list - non_dropin_tests
dropin_file_list = []
tmp_dir = data_dir.get_tmp_dir()
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
for dropin_test in dropin_tests:
provider = dropin_test.split(".")[0]
d_type = dropin_test.split(".")[-1]
autogen_cfg_path = os.path.join(tmp_dir,
'%s.cfg' % dropin_test)
autogen_cfg_file = open(autogen_cfg_path, 'w')
autogen_cfg_file.write("# Drop-in test - auto generated snippet\n")
autogen_cfg_file.write("- %s:\n" % dropin_test)
autogen_cfg_file.write(" virt_test_type = %s\n" % t_type)
autogen_cfg_file.write(" type = %s\n" % d_type)
autogen_cfg_file.close()
dropin_file_list.append([provider, autogen_cfg_path])
dropin_file_list_2 = []
dropin_tests = os.listdir(os.path.join(data_dir.get_root_dir(), "dropin"))
dropin_cfg_path = os.path.join(tmp_dir, 'dropin.cfg')
dropin_cfg_file = open(dropin_cfg_path, 'w')
dropin_cfg_file.write("# Auto generated snippet for dropin tests\n")
dropin_cfg_file.write("- dropin:\n")
dropin_cfg_file.write(" variants:\n")
for dropin_test in dropin_tests:
if dropin_test == "README":
continue
dropin_cfg_file.write(" - %s:\n" % dropin_test)
dropin_cfg_file.write(" virt_test_type = %s\n" % t_type)
dropin_cfg_file.write(" type = dropin\n")
dropin_cfg_file.write(" start_vm = no\n")
dropin_cfg_file.write(" dropin_path = %s\n" % dropin_test)
dropin_cfg_file.close()
dropin_file_list_2.append(['io-github-autotest-qemu', dropin_cfg_path])
subtests_cfg = os.path.join(root_dir, 'backends', t_type, 'cfg',
'subtests.cfg')
subtests_file = open(subtests_cfg, 'w')
subtests_file.write(
"# Do not edit, auto generated file from subtests config\n")
subtests_file.write("variants subtest:\n")
write_subtests_files(first_subtest_file, subtests_file)
write_subtests_files(specific_file_list, subtests_file, t_type)
write_subtests_files(shared_file_list, subtests_file)
write_subtests_files(dropin_file_list, subtests_file)
write_subtests_files(dropin_file_list_2, subtests_file)
write_subtests_files(last_subtest_file, subtests_file)
subtests_file.close()
def create_config_files(test_dir, shared_dir, interactive, step=None,
force_update=False):
def is_file_tracked(fl):
tracked_result = utils.run("git ls-files %s --error-unmatch" % fl,
ignore_status=True, verbose=False)
return (tracked_result.exit_status == 0)
if step is None:
step = 0
logging.info("")
step += 1
logging.info("%d - Generating config set", step)
config_file_list = data_dir.SubdirGlobList(os.path.join(test_dir, "cfg"),
"*.cfg",
config_filter)
config_file_list = [cf for cf in config_file_list if is_file_tracked(cf)]
config_file_list_shared = glob.glob(os.path.join(shared_dir, "cfg",
"*.cfg"))
# Handle overrides of cfg files. Let's say a test provides its own
# subtest.cfg.sample, this file takes precedence over the shared
# subtest.cfg.sample. So, yank this file from the cfg file list.
config_file_list_shared_keep = []
for cf in config_file_list_shared:
basename = os.path.basename(cf)
target = os.path.join(test_dir, "cfg", basename)
if target not in config_file_list:
config_file_list_shared_keep.append(cf)
config_file_list += config_file_list_shared_keep
for config_file in config_file_list:
src_file = config_file
dst_file = os.path.join(test_dir, "cfg", os.path.basename(config_file))
if not os.path.isfile(dst_file):
logging.debug("Creating config file %s from sample", dst_file)
shutil.copyfile(src_file, dst_file)
else:
diff_cmd = "diff -Naur %s %s" % (dst_file, src_file)
diff_result = utils.run(
diff_cmd, ignore_status=True, verbose=False)
if diff_result.exit_status != 0:
logging.info("%s result:\n %s",
diff_result.command, diff_result.stdout)
if interactive:
answer = utils.ask("Config file %s differs from %s."
"Overwrite?" % (dst_file, src_file))
elif force_update:
answer = "y"
else:
answer = "n"
if answer == "y":
logging.debug("Restoring config file %s from sample",
dst_file)
shutil.copyfile(src_file, dst_file)
else:
logging.debug("Preserving existing %s file", dst_file)
else:
logging.debug("Config file %s exists, not touching", dst_file)
def haz_defcon(datadir, imagesdir, isosdir, tmpdir):
"""
Compare current types from Defaults, or if default, compare on-disk type
"""
# Searching through default contexts is very slow.
# Exploit restorecon -n to find any defaults
try:
# First element is list, third tuple item is desired context
data_type = utils_selinux.diff_defcon(datadir, False)[0][2]
except IndexError: # object matches default, get current on-disk context
data_type = utils_selinux.get_context_of_file(datadir)
# Extract just the type component
data_type = utils_selinux.get_type_from_context(data_type)
try:
# Do not descend, we want to know the base-dir def. context
images_type = utils_selinux.diff_defcon(imagesdir, False)[0][2]
except IndexError:
images_type = utils_selinux.get_context_of_file(imagesdir)
images_type = utils_selinux.get_type_from_context(images_type)
try:
isos_type = utils_selinux.diff_defcon(isosdir, False)[0][2]
except IndexError:
isos_type = utils_selinux.get_context_of_file(isosdir)
isos_type = utils_selinux.get_type_from_context(isos_type)
try:
tmp_type = utils_selinux.diff_defcon(tmpdir, False)[0][2]
except IndexError:
tmp_type = utils_selinux.get_context_of_file(tmpdir)
tmp_type = utils_selinux.get_type_from_context(tmp_type)
# hard-coded values b/c only four of them and widly-used
if data_type == 'virt_var_lib_t':
if images_type == 'virt_image_t':
if isos_type == 'virt_content_t':
if tmp_type == 'user_tmp_t':
return True # No changes needed
return False
def set_defcon(datadir, imagesdir, isosdir, tmpdir):
"""
Tries to set datadir default contexts returns True if changed
"""
made_changes = False
try:
# Returns list of tuple(pathname, from, to) of context differences
# between on-disk and defaults. Only interested in top-level
# object [0] and the context it would change to [2]
data_type = utils_selinux.diff_defcon(datadir, False)[0][2]
# Extrach only the type
existing_data = utils_selinux.get_type_from_context(data_type)
except IndexError:
existing_data = None
try:
images_type = utils_selinux.diff_defcon(imagesdir, False)[0][2]
existing_images = utils_selinux.get_type_from_context(images_type)
except IndexError:
existing_images = None
try:
isos_type = utils_selinux.diff_defcon(isosdir, False)[0][2]
existing_isos = utils_selinux.get_type_from_context(isos_type)
except IndexError:
existing_isos = None
try:
tmp_type = utils_selinux.diff_defcon(tmpdir, False)[0][2]
existing_tmp = utils_selinux.get_type_from_context(tmp_type)
except IndexError:
existing_tmp = None
# Only print slow info message one time
could_be_slow = False
msg = "Defining default contexts, this could take a few seconds..."
# Changing default contexts is *slow*, avoid it if not necessary
if existing_data is None or existing_data is not 'virt_var_lib_t':
# semanage gives errors if don't treat /usr & /usr/local the same
data_regex = utils_selinux.transmogrify_usr_local(datadir)
logging.info(msg)
could_be_slow = True
# This applies only to datadir symlink, not sub-directories!
utils_selinux.set_defcon('virt_var_lib_t', data_regex)
made_changes = True
if existing_images is None or existing_images is not 'virt_image_t':
# Applies to imagesdir and everything below
images_regex = utils_selinux.transmogrify_usr_local(imagesdir)
images_regex = utils_selinux.transmogrify_sub_dirs(images_regex)
if not could_be_slow:
logging.info(msg)
could_be_slow = True
utils_selinux.set_defcon('virt_image_t', images_regex)
made_changes = True
if existing_isos is None or existing_isos is not 'virt_content_t':
# Applies to isosdir and everything below
isos_regex = utils_selinux.transmogrify_usr_local(isosdir)
isos_regex = utils_selinux.transmogrify_sub_dirs(isos_regex)
if not could_be_slow:
logging.info(msg)
could_be_slow = True
utils_selinux.set_defcon('virt_content_t', isos_regex)
made_changes = True
if existing_tmp is None or existing_tmp is not 'user_tmp_t':
tmp_regex = utils_selinux.transmogrify_usr_local(tmpdir)
tmp_regex = utils_selinux.transmogrify_sub_dirs(tmp_regex)
if not could_be_slow:
logging.info(msg)
could_be_slow = True
utils_selinux.set_defcon('user_tmp_t', tmp_regex)
made_changes = True
return made_changes
def verify_selinux(datadir, imagesdir, isosdir, tmpdir,
interactive, selinux=False):
"""
Verify/Set/Warn about SELinux and default file contexts for testing.
:param datadir: Abs. path to data-directory symlink
:param imagesdir: Abs. path to data/images directory
:param isosdir: Abs. path to data/isos directory
:param tmpdir: Abs. path to virt-test tmp dir
:param interactive: True if running from console
:param selinux: Whether setup SELinux contexts for shared/data
"""
# datadir can be a symlink, but these must not have any
imagesdir = os.path.realpath(imagesdir)
isosdir = os.path.realpath(isosdir)
tmpdir = os.path.realpath(tmpdir)
needs_relabel = None
try:
# Raise SeCmdError if selinux not installed
if utils_selinux.get_status() == 'enforcing':
# Check if default contexts are set
if not haz_defcon(datadir, imagesdir, isosdir, tmpdir):
if selinux:
answer = "y"
else:
if interactive:
answer = utils.ask("Setup all undefined default SE"
"Linux contexts for shared/data/?")
else:
answer = "n"
else:
answer = "n"
if answer.lower() == "y":
# Assume relabeling is needed if changes made
needs_relabel = set_defcon(datadir, imagesdir, isosdir, tmpdir)
# Only relabel if files/dirs don't match default
labels_ok = utils_selinux.verify_defcon(datadir, False)
labels_ok &= utils_selinux.verify_defcon(imagesdir, True)
labels_ok &= utils_selinux.verify_defcon(isosdir, True)
labels_ok &= utils_selinux.verify_defcon(tmpdir, True)
if labels_ok:
needs_relabel = False
else:
logging.warning("On-disk SELinux labels do not match defaults")
needs_relabel = True
# Disabled or Permissive mode is same result as not installed
else:
logging.info("SELinux in permissive or disabled, testing"
"in enforcing mode is highly encourraged.")
except utils_selinux.SemanageError:
logging.info("Could not set default SELinux contexts. Please")
logging.info("consider installing the semanage program then ")
logging.info("verifying and/or running running:")
# Paths must be transmogrified (changed) into regular expressions
logging.info("semanage fcontext --add -t virt_var_lib_t '%s'",
utils_selinux.transmogrify_usr_local(datadir))
logging.info("semanage fcontext --add -t virt_image_t '%s'",
utils_selinux.transmogrify_usr_local(
utils_selinux.transmogrify_sub_dirs(imagesdir)))
logging.info("semanage fcontext --add -t virt_content_t '%s'",
utils_selinux.transmogrify_usr_local(
utils_selinux.transmogrify_sub_dirs(isosdir)))
logging.info("semanage fcontext --add -t user_tmp_t '%s'",
utils_selinux.transmogrify_usr_local(
utils_selinux.transmogrify_sub_dirs(tmpdir)))
needs_relabel = None # Next run will catch if relabeling needed
except utils_selinux.SelinuxError: # Catchall SELinux related
logging.info("SELinux not available, or error in command/setup.")
logging.info("Please manually verify default file contexts before")
logging.info("testing with SELinux enabled and enforcing.")
if needs_relabel:
if selinux:
answer = "y"
else:
if interactive:
answer = utils.ask("Relabel from default contexts?")
else:
answer = "n"
if answer.lower() == 'y':
changes = utils_selinux.apply_defcon(datadir, False)
changes += utils_selinux.apply_defcon(imagesdir, True)
changes += utils_selinux.apply_defcon(isosdir, True)
changes += utils_selinux.apply_defcon(tmpdir, True)
logging.info("Corrected contexts on %d files/dirs",
len(changes))
def bootstrap(test_name, test_dir, base_dir, default_userspace_paths,
check_modules, online_docs_url, restore_image=False,
download_image=True, interactive=True, selinux=False,
verbose=False, update_providers=False,
guest_os=defaults.DEFAULT_GUEST_OS):
"""
Common virt test assistant module.
:param test_name: Test name, such as "qemu".
:param test_dir: Path with the test directory.
:param base_dir: Base directory used to hold images and isos.
:param default_userspace_paths: Important programs for a successful test
execution.
:param check_modules: Whether we want to verify if a given list of modules
is loaded in the system.
:param online_docs_url: URL to an online documentation system, such as a
wiki page.
:param restore_image: Whether to restore the image from the pristine.
:param interactive: Whether to ask for confirmation.
:param verbose: Verbose output.
:param selinux: Whether setup SELinux contexts for shared/data.
:param update_providers: Whether to update test providers if they are already
downloaded.
:param guest_os: Specify the guest image used for bootstrapping. By default
the JeOS image is used.
:raise error.CmdError: If JeOS image failed to uncompress
:raise ValueError: If 7za was not found
"""
if interactive:
logging_manager.configure_logging(utils_misc.VirtLoggingConfig(),
verbose=verbose)
logging.info("%s test config helper", test_name)
step = 0
logging.info("")
step += 1
logging.info("%d - Updating all test providers", step)
asset.download_all_test_providers(update_providers)
logging.info("")
step += 1
logging.info("%d - Checking the mandatory programs and headers", step)
verify_mandatory_programs(test_name)
logging.info("")
step += 1
logging.info("%d - Checking the recommended programs", step)
verify_recommended_programs(test_name)
logging.info("")
step += 1
logging.info("%d - Verifying directories", step)
shared_dir = os.path.dirname(data_dir.get_data_dir())
sub_dir_list = ["images", "isos", "steps_data", "gpg"]
for sub_dir in sub_dir_list:
sub_dir_path = os.path.join(base_dir, sub_dir)
if not os.path.isdir(sub_dir_path):
logging.debug("Creating %s", sub_dir_path)
os.makedirs(sub_dir_path)
else:
logging.debug("Dir %s exists, not creating",
sub_dir_path)
datadir = data_dir.get_data_dir()
if test_name == 'libvirt':
create_config_files(test_dir, shared_dir, interactive, step)
create_subtests_cfg(test_name)
create_guest_os_cfg(test_name)
# Don't bother checking if changes can't be made
if os.getuid() == 0:
verify_selinux(datadir,
os.path.join(datadir, 'images'),
os.path.join(datadir, 'isos'),
data_dir.get_tmp_dir(),
interactive, selinux)
# lvsb test doesn't use any shared configs
elif test_name == 'lvsb':
create_subtests_cfg(test_name)
if os.getuid() == 0:
# Don't bother checking if changes can't be made
verify_selinux(datadir,
os.path.join(datadir, 'images'),
os.path.join(datadir, 'isos'),
data_dir.get_tmp_dir(),
interactive, selinux)
else: # Some other test
create_config_files(test_dir, shared_dir, interactive, step)
create_subtests_cfg(test_name)
create_guest_os_cfg(test_name)
if download_image or restore_image:
logging.info("")
step += 2
logging.info("%s - Verifying (and possibly downloading) guest image",
step)
for os_info in get_guest_os_info_list(test_name, guest_os):
os_asset = os_info['asset']
asset.download_asset(os_asset, interactive=interactive,
restore_image=restore_image)
if check_modules:
logging.info("")
step += 1
logging.info("%d - Checking for modules %s", step,
", ".join(check_modules))
for module in check_modules:
if not utils.module_is_loaded(module):
logging.warning("Module %s is not loaded. You might want to "
"load it", module)
else:
logging.debug("Module %s loaded", module)
if online_docs_url:
logging.info("")
step += 1
logging.info("%d - If you wish, take a look at the online docs for "
"more info", step)
logging.info("")
logging.info(online_docs_url)
|
hy-2013/scrapy
|
refs/heads/master
|
scrapy/utils/sitemap.py
|
146
|
"""
Module for processing Sitemaps.
Note: The main purpose of this module is to provide support for the
SitemapSpider, its API is subject to change without notice.
"""
import lxml.etree
class Sitemap(object):
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext):
xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt
def __iter__(self):
for elem in self._root.getchildren():
d = {}
for el in elem.getchildren():
tag = el.tag
name = tag.split('}', 1)[1] if '}' in tag else tag
if name == 'link':
if 'href' in el.attrib:
d.setdefault('alternate', []).append(el.get('href'))
else:
d[name] = el.text.strip() if el.text else ''
if 'loc' in d:
yield d
def sitemap_urls_from_robots(robots_text):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
yield line.split(':', 1)[1].strip()
|
anorfleet/turntable
|
refs/heads/master
|
test/lib/python2.7/site-packages/scipy/stats/tests/test_fit.py
|
22
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import dec
from scipy import stats
from test_continuous_basic import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
fit_sizes = [1000, 5000] # sample sizes to try
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
failing_fits = [
'burr',
'chi',
'chi2',
'gausshyper',
'genexpon',
'gengamma',
'ksone',
'mielke',
'ncf',
'ncx2',
'pearson3',
'powerlognorm',
'truncexpon',
'tukeylambda',
'vonmises',
'wrapcauchy',
'levy_stable'
]
# Don't run the fit test on these:
skip_fit = [
'erlang', # Subclass of gamma, generates a warning.
]
@dec.slow
def test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
if distname not in skip_fit:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
if distname in failing_fits:
# Skip failing fits unless overridden
xfail = True
try:
xfail = not int(os.environ['SCIPY_XFAIL'])
except:
pass
if xfail:
msg = "Fitting %s doesn't work reliably yet" % distname
msg += " [Set environment variable SCIPY_XFAIL=1 to run this test nevertheless.]"
dec.knownfailureif(True, msg)(lambda: None)()
distfn = getattr(stats, distname)
truearg = np.hstack([arg,[0.0,1.0]])
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
for fit_size in fit_sizes:
# Note that if a fit succeeds, the other fit_sizes are skipped
np.random.seed(1234)
with np.errstate(all='ignore'):
rvs = distfn.rvs(size=fit_size, *arg)
est = distfn.fit(rvs) # start with default values
diff = est - truearg
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.all(np.abs(diff) <= diffthreshold):
break
else:
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
if __name__ == "__main__":
np.testing.run_module_suite()
|
theonlynexus/gui2py
|
refs/heads/master
|
gui/controls/combobox.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's ComboBox control (uses wx.ComboBox - wx.Choice is not used by now)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart" # where applicable
# Initial implementation was based on PythonCard's ComboBox component,
# but redesigned and overhauled a lot (specs renamed, events refactorized, etc.)
import wx
from ..event import FormEvent
from ..component import Control, Spec, EventSpec, InitSpec, StyleSpec
from .listbox import ItemContainerControl
from .. import images
class ComboBox(ItemContainerControl):
"A combobox control (textbox + listbox)"
_wx_class = wx.ComboBox
_style = wx.CB_DROPDOWN | wx.NO_FULL_REPAINT_ON_RESIZE | wx.CLIP_SIBLINGS
_image = images.combobox
_commandtype = wx.wxEVT_COMMAND_COMBOBOX_SELECTED
multiselect = False # required by ItemContainerControl
text = InitSpec(lambda self: self.wx_obj.GetValue(),
lambda self, value: self.wx_obj.SetValue(value),
default="", type="string", _name="_value")
readonly = StyleSpec(wx.CB_READONLY, default=False)
onclick = onselect = EventSpec('click',
binding=wx.EVT_COMBOBOX, kind=FormEvent)
onchange = EventSpec('change', binding=wx.EVT_TEXT, kind=FormEvent)
# On Windows ComboBox cannot be clicked, so enable facade fake screenshot image (designer):
ComboBox._meta.facade = True
if __name__ == "__main__":
import sys
# basic test until unit_test
app = wx.App(redirect=False)
frame = wx.Frame(None)
c = ComboBox(frame, name="cboTest",
items={'datum1': 'a', 'datum2':'b', 'datum3':'c'},
readonly='--readonly' in sys.argv,
)
c.append("d")
c.append("e", "datum1")
c.data_selection = "datum2"
from pprint import pprint
# assign some event handlers:
c.onclick = lambda event: pprint("selection: %s" % str(event.target.selection))
c.onchange = lambda event: pprint("text: %s" % event.target.text)
print c.items
frame.Show()
app.MainLoop()
|
1stsetup/Marlin
|
refs/heads/Michel
|
buildroot/share/PlatformIO/scripts/mks_robin.py
|
10
|
import os
Import("env")
# Relocate firmware from 0x08000000 to 0x08007000
for define in env['CPPDEFINES']:
if define[0] == "VECT_TAB_ADDR":
env['CPPDEFINES'].remove(define)
env['CPPDEFINES'].append(("VECT_TAB_ADDR", "0x08007000"))
custom_ld_script = os.path.abspath("buildroot/share/PlatformIO/ldscripts/mks_robin.ld")
for i, flag in enumerate(env["LINKFLAGS"]):
if "-Wl,-T" in flag:
env["LINKFLAGS"][i] = "-Wl,-T" + custom_ld_script
elif flag == "-T":
env["LINKFLAGS"][i + 1] = custom_ld_script
# Encrypt ${PROGNAME}.bin and save it as 'Robin.bin'
def encrypt(source, target, env):
import sys
key = [0xA3, 0xBD, 0xAD, 0x0D, 0x41, 0x11, 0xBB, 0x8D, 0xDC, 0x80, 0x2D, 0xD0, 0xD2, 0xC4, 0x9B, 0x1E, 0x26, 0xEB, 0xE3, 0x33, 0x4A, 0x15, 0xE4, 0x0A, 0xB3, 0xB1, 0x3C, 0x93, 0xBB, 0xAF, 0xF7, 0x3E]
firmware = open(target[0].path, "rb")
robin = open(target[0].dir.path +'/Robin.bin', "wb")
length = os.path.getsize(target[0].path)
position = 0
try:
while position < length:
byte = firmware.read(1)
if position >= 320 and position < 31040:
byte = chr(ord(byte) ^ key[position & 31])
if sys.version_info[0] > 2:
byte = bytes(byte, 'latin1')
robin.write(byte)
position += 1
finally:
firmware.close()
robin.close()
env.AddPostAction("$BUILD_DIR/${PROGNAME}.bin", encrypt);
|
111pontes/ydk-py
|
refs/heads/master
|
cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_CISCO_IETF_FRR_MIB.py
|
1
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'CiscoIetfFrrMib.Cmplsfrrscalars.CmplsfrrconstprotectionmethodEnum' : _MetaInfoEnum('CmplsfrrconstprotectionmethodEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB',
{
'oneToOneBackup':'oneToOneBackup',
'facilityBackup':'facilityBackup',
}, 'CISCO-IETF-FRR-MIB', _yang_ns._namespaces['CISCO-IETF-FRR-MIB']),
'CiscoIetfFrrMib.Cmplsfrrscalars' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrscalars',
False,
[
_MetaInfoClassMember('cmplsFrrActProtectedIfs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the number of interfaces currently being protected
by the FRR feature if mplsFrrConstProtectionMethod is set to
facilityBackup(1), otherwise this value should return 0 to
indicate that LSPs traversing any interface may be protected.
This value MUST be less than or equal to mplsFrrConfIfs.
''',
'cmplsfrractprotectedifs',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrActProtectedLSPs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the number of LSPs currently protected by
the FRR feature. If mplsFrrConstProtectionMethod is set
to facilityBackup(1)this object MUST return 0.
''',
'cmplsfrractprotectedlsps',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrActProtectedTuns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the number of bypass tunnels indicated in
mplsFrrConfProtectingTuns whose operStatus
is up(1) indicating that they are currently protecting
facilities on this LSR using the FRR feature. This
object MUST return 0 if mplsFrrConstProtectionMethod
is set to facilityBackup(1).
''',
'cmplsfrractprotectedtuns',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConfProtectingTuns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the number of bypass tunnels configured to
protect facilities on this LSR using the FRR feature
if mplsFrrConstProtectionMethod is set to
facilityBackup(1), otherwise this value MUST return
0.
''',
'cmplsfrrconfprotectingtuns',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstProtectionMethod', REFERENCE_ENUM_CLASS, 'CmplsfrrconstprotectionmethodEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrscalars.CmplsfrrconstprotectionmethodEnum',
[], [],
''' Indicates which protection method is to be used for fast
reroute. Some devices may require a reboot of their routing
processors if this variable is changed. An agent which
does not wish to reboot or modify its FRR mode
MUST return an inconsistentValue error. Please
consult the device's agent capability statement
for more details.
''',
'cmplsfrrconstprotectionmethod',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrDetourIncoming', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of detour LSPs entering the device if
mplsFrrConstProtectionMethod is set to oneToOneBackup(0), or
or 0 if mplsFrrConstProtectionMethod is set to
facilityBackup(1).
''',
'cmplsfrrdetourincoming',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrDetourOriginating', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of detour LSPs originating at this PLR if
mplsFrrConstProtectionMethod is set to oneToOneBackup(0).
This object MUST return 0 if the mplsFrrConstProtectionMethod
is set to facilityBackup(1).
''',
'cmplsfrrdetouroriginating',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrDetourOutgoing', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of detour LSPs leaving the device if
mplsFrrConstProtectionMethod is set to oneToOneBackup(0),
or 0 if mplsFrrConstProtectionMethod is set to
to facilityBackup(1).
''',
'cmplsfrrdetouroutgoing',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogTableCurrEntries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the current number of entries in the FRR log
table.
''',
'cmplsfrrlogtablecurrentries',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogTableMaxEntries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the maximum number of entries allowed in the FRR
Log table. Agents receiving SETs for values that cannot be
used must return an inconsistent value error. If a manager
sets this value to 0, this indicates that no logging should
take place by the agent.
If this value is returned as 0, this indicates
that no additional log entries will be added to the current
table either because the table has been completely
filled or logging has been disabled. However, agents
may wish to not delete existing entries in the log table
so that managers may review them in the future.
It is implied that when mplsFrrLogTableCurrEntries
has reached the value of this variable, that logging
entries may not continue to be added to the table,
although existing ones may remain. Furthermore, an
agent may begin to delete existing (perhaps the
oldest entries) entries to make room for new ones.
''',
'cmplsfrrlogtablemaxentries',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrNotifMaxRate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This variable indicates the number of milliseconds
that must elapse between notification emissions. If
events occur more rapidly, the implementation may
simply fail to emit these notifications during that
period, or may queue them until an appropriate
time in the future. A value of 0 means no minimum
elapsed period is specified.
''',
'cmplsfrrnotifmaxrate',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrNotifsEnabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enables or disables FRR notifications defined in this MIB
module. Notifications are disabled by default.
''',
'cmplsfrrnotifsenabled',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrNumOfConfIfs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates the number of MPLS interfaces configured for
protection by the FRR feature, otherwise this value
MUST return 0 to indicate that LSPs traversing any
interface may be protected.
''',
'cmplsfrrnumofconfifs',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrSwitchover', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of tunnel instances that are switched over to their
corresponding detour LSP if mplsFrrConstProtectionMethod is set
to oneToOneBackup(0), or tunnels being switched over if
mplsFrrConstProtectionMethod is set to facilityBackup(1).
''',
'cmplsfrrswitchover',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrScalars',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry',
False,
[
_MetaInfoClassMember('cmplsFrrConstIfIndex', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Uniquely identifies an interface for which fast reroute is
configured. Tabular entries indexed with a 0 value apply to all
interfaces on this device for which the FRR feature can operate
on.
''',
'cmplsfrrconstifindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrConstTunnelIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Uniquely identifies a tunnel for which fast reroute is
requested.
''',
'cmplsfrrconsttunnelindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrConstTunnelInstance', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Uniquely identifies an instance of this tunnel for which fast
reroute is requested.
''',
'cmplsfrrconsttunnelinstance',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrConstBandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This variable represents the bandwidth for detour LSPs of this
tunnel, in units of thousands of bits per second (Kbps).
''',
'cmplsfrrconstbandwidth',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstExclAllAffinity', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' A link satisfies the exclude-all constraint if and only if the
link contains none of the administrative groups specified in the
constraint.
''',
'cmplsfrrconstexclallaffinity',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstHoldingPrio', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Indicates the holding priority for detour LSP.
''',
'cmplsfrrconstholdingprio',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstHopLimit', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The maximum number of hops that the detour LSP may traverse.
''',
'cmplsfrrconsthoplimit',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstInclAllAffinity', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' A link satisfies the include-all constraint if and only if the
link contains all of the administrative groups specified in the
constraint.
''',
'cmplsfrrconstinclallaffinity',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstInclAnyAffinity', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' A link satisfies the include-any constraint if and only if the
constraint is zero, or the link and the constraint have a
resource class in common.
''',
'cmplsfrrconstinclanyaffinity',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstNumProtectedTunOnIf', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of tunnels protected on this interface.
''',
'cmplsfrrconstnumprotectedtunonif',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstNumProtectingTunOnIf', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of backup tunnels protecting the specified
interface.
''',
'cmplsfrrconstnumprotectingtunonif',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstRowStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' This object is used to create, modify, and/or delete a row in
this table.
''',
'cmplsfrrconstrowstatus',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrConstSetupPrio', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Indicates the setup priority of detour LSP.
''',
'cmplsfrrconstsetupprio',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrConstEntry',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrconsttable' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrconsttable',
False,
[
_MetaInfoClassMember('cmplsFrrConstEntry', REFERENCE_LIST, 'Cmplsfrrconstentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry',
[], [],
''' An entry in this table represents detour LSP or bypass tunnel
setup constraints for a tunnel instance to be protected by
detour LSPs or a tunnel. Agents must allow entries in this table
to be created only for tunnel instances that require fast-reroute.
Entries indexed with mplsFrrConstIfIndex set to 0 apply to all
interfaces on this device for which the FRR feature can operate
on.
''',
'cmplsfrrconstentry',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrConstTable',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry.CmplsfrrlogeventtypeEnum' : _MetaInfoEnum('CmplsfrrlogeventtypeEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB',
{
'other':'other',
'protected':'protected',
}, 'CISCO-IETF-FRR-MIB', _yang_ns._namespaces['CISCO-IETF-FRR-MIB']),
'CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry',
False,
[
_MetaInfoClassMember('cmplsFrrLogIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Uniquely identifies a fast reroute event entry.
''',
'cmplsfrrlogindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrLogEventDuration', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object describes the duration of this event.
''',
'cmplsfrrlogeventduration',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogEventReasonString', ATTRIBUTE, 'str' , None, None,
[(128, None)], [],
''' This object contains an implementation-specific explanation
of the event.
''',
'cmplsfrrlogeventreasonstring',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogEventTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object provides the amount of time ticks since this
event occured.
''',
'cmplsfrrlogeventtime',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogEventType', REFERENCE_ENUM_CLASS, 'CmplsfrrlogeventtypeEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry.CmplsfrrlogeventtypeEnum',
[], [],
''' This object describes what type of fast reroute event
occured.
''',
'cmplsfrrlogeventtype',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogInterface', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' This object indicates which interface was affected by this
FRR event. This value may be set to 0 if
mplsFrrConstProtectionMethod is set to oneToOneBackup(0).
''',
'cmplsfrrloginterface',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrLogEntry',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrlogtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrlogtable',
False,
[
_MetaInfoClassMember('cmplsFrrLogEntry', REFERENCE_LIST, 'Cmplsfrrlogentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry',
[], [],
''' An entry in this table is created to describe one fast
reroute event. Entries in this table are only created and
destroyed by the agent implementation. The maximum number
of entries in this log is governed by the scalar.
''',
'cmplsfrrlogentry',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrLogTable',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectedtunstatusEnum' : _MetaInfoEnum('CmplsfrrfacrouteprotectedtunstatusEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB',
{
'active':'active',
'ready':'ready',
'partial':'partial',
}, 'CISCO-IETF-FRR-MIB', _yang_ns._namespaces['CISCO-IETF-FRR-MIB']),
'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectingtunprotectiontypeEnum' : _MetaInfoEnum('CmplsfrrfacrouteprotectingtunprotectiontypeEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB',
{
'linkProtection':'linkProtection',
'nodeProtection':'nodeProtection',
}, 'CISCO-IETF-FRR-MIB', _yang_ns._namespaces['CISCO-IETF-FRR-MIB']),
'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry',
False,
[
_MetaInfoClassMember('cmplsFrrFacRouteProtectedIfIndex', ATTRIBUTE, 'int' , None, None,
[('1', '2147483647')], [],
''' Uniquely identifies the interface configured for FRR protection.
''',
'cmplsfrrfacrouteprotectedifindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectingTunIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Uniquely identifies the mplsTunnelEntry primary index for
the tunnel head interface designated to protect the
interface as specified in the mplsFrrFacRouteIfProtectedIndex
(and all of the tunnels using this interface).
''',
'cmplsfrrfacrouteprotectingtunindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectedTunIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Uniquely identifies an mplsTunnelEntry that is
being protected by FRR.
''',
'cmplsfrrfacrouteprotectedtunindex',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectedTunInstance', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Uniquely identifies an mplsTunnelEntry that is
being protected by FRR.
''',
'cmplsfrrfacrouteprotectedtuninstance',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectedTunIngressLSRId', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Uniquely identifies an mplsTunnelEntry that is
being protected by FRR.
''',
'cmplsfrrfacrouteprotectedtuningresslsrid',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectedTunEgressLSRId', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Uniquely identifies an mplsTunnelEntry that is
being protected by FRR.
''',
'cmplsfrrfacrouteprotectedtunegresslsrid',
'CISCO-IETF-FRR-MIB', True),
_MetaInfoClassMember('cmplsFrrFacRouteProtectedTunStatus', REFERENCE_ENUM_CLASS, 'CmplsfrrfacrouteprotectedtunstatusEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectedtunstatusEnum',
[], [],
''' Specifies the state of the protected tunnel.
active This tunnel's label has been placed in the
LFIB and is ready to be applied to incoming
packets.
ready - This tunnel's label entry has been created but is
not yet in the LFIB.
partial - This tunnel's label entry as not been fully
created.
''',
'cmplsfrrfacrouteprotectedtunstatus',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrFacRouteProtectingTunProtectionType', REFERENCE_ENUM_CLASS, 'CmplsfrrfacrouteprotectingtunprotectiontypeEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectingtunprotectiontypeEnum',
[], [],
''' Indicates type of the resource protection.
''',
'cmplsfrrfacrouteprotectingtunprotectiontype',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrFacRouteProtectingTunResvBw', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Specifies the amount of bandwidth in megabytes per second
that is actually reserved by the backup tunnel for
facility backup. This value is repeated here from the MPLS-
TE MIB because the tunnel entry will reveal the bandwidth
reserved by the signaling protocol, which is typically 0
for backup tunnels so as to not over-book bandwidth.
However, internal reservations are typically made on the
PLR, thus this value should be revealed here.
''',
'cmplsfrrfacrouteprotectingtunresvbw',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrFacRouteDBEntry',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib.Cmplsfrrfacroutedbtable',
False,
[
_MetaInfoClassMember('cmplsFrrFacRouteDBEntry', REFERENCE_LIST, 'Cmplsfrrfacroutedbentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry',
[], [],
''' An entry in the mplsFrrDBTable represents a single protected
LSP, protected by a backup tunnel and defined for a specific
protected interface. Note that for brevity, managers should
consult the mplsTunnelTable present in the MPLS-TE MIB for
additional information about the protecting and protected
tunnels, and the ifEntry in the IF-MIB for the protected
interface.
''',
'cmplsfrrfacroutedbentry',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'cmplsFrrFacRouteDBTable',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
'CiscoIetfFrrMib' : {
'meta_info' : _MetaInfoClass('CiscoIetfFrrMib',
False,
[
_MetaInfoClassMember('cmplsFrrConstTable', REFERENCE_CLASS, 'Cmplsfrrconsttable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrconsttable',
[], [],
''' This table shows detour setup constraints.
''',
'cmplsfrrconsttable',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrFacRouteDBTable', REFERENCE_CLASS, 'Cmplsfrrfacroutedbtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrfacroutedbtable',
[], [],
''' The mplsFrrFacRouteDBTable provides information about the
fast reroute database. Each entry belongs to an interface,
protecting backup tunnel and protected tunnel. MPLS
interfaces defined on this node are protected by backup
tunnels and are indexed by mplsFrrFacRouteProtectedIndex.
Backup tunnels defined to protect the tunnels traversing an
interface, and are indexed by
mplsFrrFacRouteProtectingTunIndex. Note that the tunnel
instance index is not required, since it is implied to be 0,
which indicates the tunnel head interface for the protecting
tunnel. The protecting tunnel is defined to exist on the PLR
in the FRR specification. Protected tunnels are the LSPs that
traverse the protected link. These LSPs are uniquely
identified by mplsFrrFacRouteProtectedTunIndex,
mplsFrrFacRouteProtectedTunInstance,
mplsFrrFacRouteProtectedTunIngressLSRId, and
mplsFrrFacRouteProtectedTunEgressLSRId.
''',
'cmplsfrrfacroutedbtable',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrLogTable', REFERENCE_CLASS, 'Cmplsfrrlogtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrlogtable',
[], [],
''' The fast reroute log table records fast reroute events such
as protected links going up or down or the FRR feature
kicking in.
''',
'cmplsfrrlogtable',
'CISCO-IETF-FRR-MIB', False),
_MetaInfoClassMember('cmplsFrrScalars', REFERENCE_CLASS, 'Cmplsfrrscalars' , 'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB', 'CiscoIetfFrrMib.Cmplsfrrscalars',
[], [],
''' ''',
'cmplsfrrscalars',
'CISCO-IETF-FRR-MIB', False),
],
'CISCO-IETF-FRR-MIB',
'CISCO-IETF-FRR-MIB',
_yang_ns._namespaces['CISCO-IETF-FRR-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB'
),
},
}
_meta_table['CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry']['meta_info'].parent =_meta_table['CiscoIetfFrrMib.Cmplsfrrconsttable']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry']['meta_info'].parent =_meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry']['meta_info'].parent =_meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrscalars']['meta_info'].parent =_meta_table['CiscoIetfFrrMib']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrconsttable']['meta_info'].parent =_meta_table['CiscoIetfFrrMib']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable']['meta_info'].parent =_meta_table['CiscoIetfFrrMib']['meta_info']
_meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable']['meta_info'].parent =_meta_table['CiscoIetfFrrMib']['meta_info']
|
Yoric/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/sslutils/pregenerated.py
|
470
|
class PregeneratedSSLEnvironment(object):
"""SSL environment to use with existing key/certificate files
e.g. when running on a server with a public domain name
"""
ssl_enabled = True
def __init__(self, logger, host_key_path, host_cert_path,
ca_cert_path=None):
self._ca_cert_path = ca_cert_path
self._host_key_path = host_key_path
self._host_cert_path = host_cert_path
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def host_cert_path(self, hosts):
"""Return the key and certificate paths for the host"""
return self._host_key_path, self._host_cert_path
def ca_cert_path(self):
"""Return the certificate path of the CA that signed the
host certificates, or None if that isn't known"""
return self._ca_cert_path
|
gorjuce/odoo
|
refs/heads/8.0
|
addons/payment_transfer/models/__init__.py
|
437
|
# -*- coding: utf-8 -*-
import payment_acquirer
|
pocketbook-free/kernel_613
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
bop/rango
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/messages/tests/middleware.py
|
556
|
from django import http
from django.contrib.messages.middleware import MessageMiddleware
from django.utils import unittest
class MiddlewareTest(unittest.TestCase):
def setUp(self):
self.middleware = MessageMiddleware()
def test_response_without_messages(self):
"""
Makes sure that the response middleware is tolerant of messages not
existing on request.
"""
request = http.HttpRequest()
response = http.HttpResponse()
self.middleware.process_response(request, response)
|
mahak/ansible
|
refs/heads/devel
|
hacking/build_library/build_ansible/errors.py
|
63
|
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class DependencyError(Exception):
"""A dependency was unmet"""
class MissingUserInput(Exception):
"""The user failed to provide input (via cli arg or interactively"""
class InvalidUserInput(Exception):
"""The user provided invalid input"""
|
sklnet/openhdf-enigma2
|
refs/heads/master
|
lib/python/Plugins/Extensions/GraphMultiEPG/plugin.py
|
21
|
from Plugins.Plugin import PluginDescriptor
from GraphMultiEpg import GraphMultiEPG
from Screens.ChannelSelection import BouquetSelector
from enigma import eServiceCenter, eServiceReference
from ServiceReference import ServiceReference
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.config import config
Session = None
Servicelist = None
bouquetSel = None
epg_bouquet = None
epg = None
class SelectBouquet(Screen):
skin = """<screen name="SelectBouquet" position="center,center" size="300,240" title="Select a bouquet">
<widget name="menu" position="10,10" size="290,225" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, bouquets, curbouquet, direction, enableWrapAround=True):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "EPGSelectActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick,
"nextBouquet": self.up,
"prevBouquet": self.down
})
entrys = [ (x[0], x[1]) for x in bouquets ]
self["menu"] = MenuList(entrys, enableWrapAround)
idx = 0
for x in bouquets:
if x[1] == curbouquet:
break
idx += 1
self.idx = idx
self.dir = direction
self.onShow.append(self.__onShow)
def __onShow(self):
self["menu"].moveToIndex(self.idx)
if self.dir == -1:
self.down()
else:
self.up()
def getCurrent(self):
cur = self["menu"].getCurrent()
return cur and cur[1]
def okbuttonClick(self):
self.close(self.getCurrent())
def up(self):
self["menu"].up()
def down(self):
self["menu"].down()
def cancelClick(self):
self.close(None)
def zapToService(service, preview = False, zapback = False):
if Servicelist.startServiceRef is None:
Servicelist.startServiceRef = Session.nav.getCurrentlyPlayingServiceReference()
if not service is None:
if not preview and not zapback:
if Servicelist.getRoot() != epg_bouquet:
Servicelist.clearPath()
if Servicelist.bouquet_root != epg_bouquet:
Servicelist.enterPath(Servicelist.bouquet_root)
Servicelist.enterPath(epg_bouquet)
Servicelist.setCurrentSelection(service)
if not zapback or preview:
Servicelist.zap(not preview, preview)
if (Servicelist.dopipzap or zapback) and not preview:
Servicelist.zapBack()
if not preview:
Servicelist.startServiceRef = None
Servicelist.startRoot = None
def getBouquetServices(bouquet):
services = [ ]
Servicelist = eServiceCenter.getInstance().list(bouquet)
if not Servicelist is None:
while True:
service = Servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def cleanup():
global Session
Session = None
global Servicelist
Servicelist = None
global bouquets
bouquets = None
global epg_bouquet
epg_bouquet = None
global epg
epg = None
def closed(ret=False):
cleanup()
def onSelectBouquetClose(bouquet):
if not bouquet is None:
services = getBouquetServices(bouquet)
if len(services):
global epg_bouquet
epg_bouquet = bouquet
epg.setServices(services)
epg.setTitle(ServiceReference(epg_bouquet).getServiceName())
def changeBouquetCB(direction, epgcall):
global epg
epg = epgcall
Session.openWithCallback(onSelectBouquetClose, SelectBouquet, bouquets, epg_bouquet, direction)
def main(session, servicelist = None, **kwargs):
global Session
Session = session
global Servicelist
Servicelist = servicelist
global bouquets
bouquets = Servicelist and Servicelist.getBouquetList()
global epg_bouquet
epg_bouquet = Servicelist and Servicelist.getRoot()
runGraphMultiEpg()
def runGraphMultiEpg():
global Servicelist
global bouquets
global epg_bouquet
if epg_bouquet is not None:
if len(bouquets) > 1 :
cb = changeBouquetCB
else:
cb = None
services = getBouquetServices(epg_bouquet)
Session.openWithCallback(reopen, GraphMultiEPG, services, zapToService, cb, ServiceReference(epg_bouquet).getServiceName())
def reopen(answer):
if answer is None:
runGraphMultiEpg()
else:
closed(answer)
def Plugins(**kwargs):
name = _("Graphical Multi EPG")
descr = _("A graphical EPG for all services of an specific bouquet")
list = [(PluginDescriptor(name=name, description=descr, where = PluginDescriptor.WHERE_EVENTINFO, needsRestart = False, fnc=main))]
if config.misc.graph_mepg.extension_menu.value:
list.append(PluginDescriptor(name=name, description=descr, where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=main))
return list
|
fabianofranz/docker-registry
|
refs/heads/master
|
docker_registry/lib/rqueue.py
|
35
|
# -*- coding: utf-8 -*-
# this module is a slight modification of Ted Nyman's QR
# https://raw.github.com/tnm/qr/master/qr.py
import logging
from docker_registry.core import compat
json = compat.json
class NullHandler(logging.Handler):
"""A logging handler that discards all logging records."""
def emit(self, record):
pass
# Clients can add handlers if they are interested.
log = logging.getLogger('qr')
log.addHandler(NullHandler())
class worker(object):
def __init__(self, q, *args, **kwargs):
self.q = q
self.err = kwargs.get('err', None)
self.args = args
self.kwargs = kwargs
def __call__(self, f):
def wrapped():
while True:
# Blocking pop
next = self.q.pop(block=True)
if not next:
continue
try:
# Try to execute the user's callback.
f(next, *self.args, **self.kwargs)
except Exception as e:
try:
# Failing that, let's call the user's
# err-back, which we should keep from
# ever throwing an exception
self.err(e, *self.args, **self.kwargs)
except Exception:
pass
return wrapped
class BaseQueue(object):
"""Base functionality common to queues."""
def __init__(self, r_conn, key, **kwargs):
self.serializer = json
self.redis = r_conn
self.key = key
def __len__(self):
"""Return the length of the queue."""
return self.redis.llen(self.key)
def __getitem__(self, val):
"""Get a slice or a particular index."""
try:
slice = self.redis.lrange(self.key, val.start, val.stop - 1)
return [self._unpack(i) for i in slice]
except AttributeError:
return self._unpack(self.redis.lindex(self.key, val))
except Exception as e:
log.error('Get item failed ** %s' % repr(e))
return None
def _pack(self, val):
"""Prepares a message to go into Redis."""
return self.serializer.dumps(val, 1)
def _unpack(self, val):
"""Unpacks a message stored in Redis."""
try:
return self.serializer.loads(val)
except TypeError:
return None
def dump(self, fobj):
"""Destructively dump the contents of the queue into fp."""
next = self.redis.rpop(self.key)
while next:
fobj.write(next)
next = self.redis.rpop(self.key)
def load(self, fobj):
"""Load the contents of the provided fobj into the queue."""
try:
while True:
val = self._pack(self.serializer.load(fobj))
self.redis.lpush(self.key, val)
except Exception:
return
def dumpfname(self, fname, truncate=False):
"""Destructively dump the contents of the queue into fname."""
if truncate:
with file(fname, 'w+') as f:
self.dump(f)
else:
with file(fname, 'a+') as f:
self.dump(f)
def loadfname(self, fname):
"""Load the contents of the contents of fname into the queue."""
with file(fname) as f:
self.load(f)
def extend(self, vals):
"""Extends the elements in the queue."""
with self.redis.pipeline(transaction=False) as pipe:
for val in vals:
pipe.lpush(self.key, self._pack(val))
pipe.execute()
def peek(self):
"""Look at the next item in the queue."""
return self[-1]
def elements(self):
"""Return all elements as a Python list."""
return [self._unpack(o) for o in self.redis.lrange(self.key, 0, -1)]
def elements_as_json(self):
"""Return all elements as JSON object."""
return json.dumps(self.elements)
def clear(self):
"""Removes all the elements in the queue."""
self.redis.delete(self.key)
class CappedCollection(BaseQueue):
"""a bounded queue
Implements a capped collection (the collection never
gets larger than the specified size).
"""
def __init__(self, r_conn, key, size, **kwargs):
BaseQueue.__init__(self, r_conn, key, **kwargs)
self.size = size
def push(self, element):
size = self.size
with self.redis.pipeline() as pipe:
# ltrim is zero-indexed
val = self._pack(element)
pipe = pipe.lpush(self.key, val).ltrim(self.key, 0, size - 1)
pipe.execute()
def extend(self, vals):
"""Extends the elements in the queue."""
with self.redis.pipeline() as pipe:
for val in vals:
pipe.lpush(self.key, self._pack(val))
pipe.ltrim(self.key, 0, self.size - 1)
pipe.execute()
def pop(self, block=False):
if not block:
popped = self.redis.rpop(self.key)
else:
queue, popped = self.redis.brpop(self.key)
log.debug('Popped ** %s ** from key ** %s **' % (popped, self.key))
return self._unpack(popped)
|
rollbar/pyrollbar
|
refs/heads/master
|
rollbar/test/starlette_tests/__init__.py
|
3
|
import sys
import unittest2
def _load_tests(loader, tests, pattern):
return unittest2.TestSuite()
if sys.version_info < (3, 6):
load_tests = _load_tests
|
geraintpalmer/ASQ
|
refs/heads/master
|
asq/tests/test_data_record.py
|
2
|
import unittest
import asq
class TestDataRecord(unittest.TestCase):
def test_init_method(self):
r = asq.DataRecord(2, 3, 2, 8, 1, 2)
self.assertEqual(r.arrival_date, 2)
self.assertEqual(r.wait, 0)
self.assertEqual(r.service_start_date, 2)
self.assertEqual(r.service_time, 3)
self.assertEqual(r.service_end_date, 5)
self.assertEqual(r.blocked, 3)
self.assertEqual(r.exit_date, 8)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 2)
r = asq.DataRecord(5.7, 2.1, 8.2, 10.3, 1, 3)
self.assertEqual(r.arrival_date, 5.7)
self.assertEqual(round(r.wait, 1), 2.5)
self.assertEqual(r.service_start_date, 8.2)
self.assertEqual(r.service_time, 2.1)
self.assertEqual(round(r.service_end_date, 1), 10.3)
self.assertEqual(round(r.blocked, 1), 0.0)
self.assertEqual(r.exit_date, 10.3)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 3)
|
rexshihaoren/algorithms
|
refs/heads/master
|
docs/conf.py
|
5
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# algorithms documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 8 22:36:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'algorithms'
copyright = '2015, Nic Young'
author = 'Nic Young'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'algorithmsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'algorithms.tex', 'algorithms Documentation',
'Nic Young', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'algorithms', 'algorithms Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'algorithms', 'algorithms Documentation',
author, 'algorithms', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
strint/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/wrappers/framework.py
|
4
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
# Helper function.
def _check_type(obj, expected_type):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_type: (type) The expected type of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance.
Raises:
ValueError: On invalid `OnSessionInitAction` value.
NotImplementedError: If a non-DirectSession sess object is received.
"""
_check_type(sess, session.BaseSession)
# TODO(cais): Remove this check once tfdbg is integrated with GrpcSession.
if sess.sess_str:
raise NotImplementedError(
"Non-DirectSession support is not available from TensorFlow "
"Debugger yet (sess_str=%s)" % sess.sess_str)
# The session being wrapped.
self._sess = sess
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def graph_def(self):
return self._sess.graph_def
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def as_default(self):
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value.
"""
self._run_call_count += 1
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options(
decorated_run_options,
run_start_resp.debug_urls,
debug_ops=run_start_resp.debug_ops,
node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=(
run_start_resp.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
with stepper.NodeStepper(
self._sess, fetches, feed_dict) as node_stepper:
retvals = self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options(self,
run_options,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
debug_ops: (str or list of str) debug op(s) to be used by the debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options,
self._sess.graph,
debug_urls=debug_urls,
debug_ops=debug_ops,
node_name_regex_whitelist=node_name_regex_whitelist,
op_type_regex_whitelist=op_type_regex_whitelist,
tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures)
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def __enter__(self):
return self._sess.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._sess.__exit__(exec_type, exec_value, exec_tb)
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Callback invoked when the client intends to step through graph nodes.
Args:
node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used
in this stepping session.
restore_variable_values_on_exit: (bool) Whether any variables whose values
have been altered during this node-stepper invocation should be restored
to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
class WatchOptions(object):
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-experssion whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, "
"op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, "
"tolerate_debug_op_creation_failures=%r)" % (
self.debug_ops, self.node_name_regex_whitelist,
self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist,
self.tolerate_debug_op_creation_failures))
class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):
"""Base class for non-interactive (i.e., non-CLI) debug wrapper sessions."""
def __init__(self, sess, watch_fn=None):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
watch_fn: (`Callable`) A Callable of the following signature:
```
def watch_fn(fetches, feeds):
# Args:
# fetches: the fetches to the `Session.run()` call.
# feeds: the feeds to the `Session.run()` call.
#
# Returns: (node_name_regex_whitelist, op_type_regex_whitelist)
# debug_ops: (str or list of str) Debug op(s) to be used by the
# debugger in this run() call.
# node_name_regex_whitelist: Regular-expression whitelist for node
# name. Same as the corresponding arg to `debug_util.watch_graph`.
# op_type_regex_whiteslit: Regular-expression whitelist for op type.
# Same as the corresponding arg to `debug_util.watch_graph`.
#
# Both or either can be None. If both are set, the two whitelists
# will operate in a logical AND relation. This is consistent with
# `debug_utils.watch_graph()`.
```
Raises:
TypeError: If a non-None `watch_fn` is specified and it is not callable.
"""
BaseDebugWrapperSession.__init__(self, sess)
self._watch_fn = None
if watch_fn is not None:
if not callable(watch_fn):
raise TypeError("watch_fn is not callable")
self._watch_fn = watch_fn
def on_session_init(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
return OnSessionInitResponse(OnSessionInitAction.PROCEED)
@abc.abstractmethod
def _prepare_run_debug_urls(self, fetches, feed_dict):
"""Abstract method to be implemented by concrete subclasses.
This method prepares the run-specific debug URL(s).
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) Debug URLs to be used in
this `Session.run()` call.
"""
def on_run_start(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
debug_urls, watch_opts = self._prepare_run_watch_config(
request.fetches, request.feed_dict)
return OnRunStartResponse(
OnRunStartAction.DEBUG_RUN,
debug_urls,
debug_ops=watch_opts.debug_ops,
node_name_regex_whitelist=watch_opts.node_name_regex_whitelist,
op_type_regex_whitelist=watch_opts.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_opts.tolerate_debug_op_creation_failures))
def _prepare_run_watch_config(self, fetches, feed_dict):
"""Get the debug_urls, and node/op whitelists for the current run() call.
Args:
fetches: Same as the `fetches` argument to `Session.run()`.
feed_dict: Same as the `feed_dict argument` to `Session.run()`.
Returns:
debug_urls: (str or list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
watch_options: (WatchOptions) The return value of a watch_fn, containing
options including debug_ops, and whitelists.
"""
debug_urls = self._prepare_run_debug_urls(fetches, feed_dict)
if self._watch_fn is None:
watch_options = WatchOptions()
else:
watch_options = self._watch_fn(fetches, feed_dict)
if isinstance(watch_options, tuple):
# For legacy return type (tuples).
watch_options = WatchOptions(*watch_options)
return debug_urls, watch_options
def on_run_end(self, request):
"""See doc of BaseDebugWrapperSession.on_run_end."""
return OnRunEndResponse()
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""See doc of BaseDebugWrapperSession.invoke_node_stepper."""
raise NotImplementedError(
"NonInteractiveDebugWrapperSession does not support node-stepper mode.")
|
llvm-mirror/openmp
|
refs/heads/master
|
runtime/test/affinity/format/check.py
|
13
|
import os
import sys
import argparse
import re
class Checks(object):
class CheckError(Exception):
pass
def __init__(self, filename, prefix):
self.checks = []
self.lines = []
self.check_no_output = False
self.filename = filename
self.prefix = prefix
def readStdin(self):
self.lines = [l.rstrip('\r\n') for l in sys.stdin.readlines()]
def readChecks(self):
with open(self.filename) as f:
for line in f:
match = re.search('{}: NO_OUTPUT'.format(self.prefix), line)
if match is not None:
self.check_no_output = True
return
match = re.search('{}: num_threads=([0-9]+) (.*)$'.format(self.prefix), line)
if match is not None:
num_threads = int(match.group(1))
for i in range(num_threads):
self.checks.append(match.group(2))
continue
def check(self):
# If no checks at all, then nothing to do
if len(self.checks) == 0 and not self.check_no_output:
print('Nothing to check for')
return
# Check if we are expecting no output
if self.check_no_output:
if len(self.lines) == 0:
return
else:
raise Checks.CheckError('{}: Output was found when expecting none.'.format(self.prefix))
# Run through each check line and see if it exists in the output
# If it does, then delete the line from output and look for the
# next check line.
# If you don't find the line then raise Checks.CheckError
# If there are extra lines of output then raise Checks.CheckError
for c in self.checks:
found = False
index = -1
for idx, line in enumerate(self.lines):
if re.search(c, line) is not None:
found = True
index = idx
break
if not found:
raise Checks.CheckError('{}: Did not find: {}'.format(self.prefix, c))
else:
del self.lines[index]
if len(self.lines) != 0:
raise Checks.CheckError('{}: Extra output: {}'.format(self.prefix, self.lines))
# Setup argument parsing
parser = argparse.ArgumentParser(description='''This script checks output of
a program against "CHECK" lines in filename''')
parser.add_argument('filename', default=None, help='filename to check against')
parser.add_argument('-c', '--check-prefix', dest='prefix',
default='CHECK', help='check prefix token default: %(default)s')
command_args = parser.parse_args()
# Do the checking
checks = Checks(command_args.filename, command_args.prefix)
checks.readStdin()
checks.readChecks()
checks.check()
|
rafaeldelucena/waterbutler
|
refs/heads/develop
|
waterbutler/providers/figshare/settings.py
|
7
|
try:
from waterbutler import settings
except ImportError:
settings = {}
config = settings.get('FIGSHARE_PROVIDER_CONFIG', {})
BASE_URL = config.get('BASE_URL', 'http://api.figshare.com/v1/my_data')
VIEW_URL = config.get('VIEW_URL', 'http://figshare.com/')
|
detrout/pykolab
|
refs/heads/master
|
pykolab/cli/telemetry/cmd_examine_session.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import pykolab
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
from pykolab import telemetry
from pykolab.cli import commands
def __init__():
commands.register('examine_session', execute, group='telemetry', description="Examine a Telemetry session.")
def execute(*args, **kw):
db = telemetry.init_db()
wanted = False
if session_id == None:
try:
wanted = conf.cli_args.pop(0)
except:
log.error(_("Unspecified session identifier"))
sys.exit(1)
if not wanted:
wanted = session_id
session_wanted = None
try:
_wanted = (int)(wanted)
session_wanted = _wanted
except:
user_wanted = wanted
if not session_wanted == None:
session = db.query(
telemetry.TelemetrySession
).filter_by(
id=session_wanted
).first()
if session == None:
log.error(_("Invalid session identifier"))
sys.exit(1)
user = db.query(
telemetry.TelemetryUser
).filter_by(
id=session.user_id
).first()
server = db.query(
telemetry.TelemetryServer
).filter_by(
id=session.server_id
).first()
else:
user = db.query(
telemetry.TelemetryUser
).filter_by(
sasl_username=user_wanted
).first()
sessions = db.query(
telemetry.TelemetrySession
).filter_by(
user_id=user.id
).order_by(
telemetry.telemetry_session_table.c.start
)
for session in sessions:
self.action_telemetry_examine_session(session_id=session.id)
return
print _("Session by %s on server %s") % (user.sasl_username,server.fqdn)
command_issues = db.query(
telemetry.TelemetryCommandIssue
).filter_by(
session_id=session.id
)
for command_issue in command_issues:
command = db.query(
telemetry.TelemetryCommand
).filter_by(
id=command_issue.command_id
).first()
command_arg = db.query(
telemetry.TelemetryCommandArg
).filter_by(
id=command_issue.command_arg_id
).first()
print "Client(%d): %s %s %s" % (
command_issue.id,
command_issue.command_tag,
command.command,
command_arg.command_arg
)
server_responses = db.query(
telemetry.TelemetryServerResponse
).filter_by(
command_issue_id=command_issue.id
)
for server_response in server_responses:
server_response_lines = server_response.response.split('\n');
for server_response_line in server_response_lines:
print "Server(%d): %s" % (
server_response.id,
server_response_line
)
|
gstiebler/odemis
|
refs/heads/master
|
src/odemis/util/driver.py
|
1
|
# -*- coding: utf-8 -*-
'''
Created on 5 Mar 2013
@author: Éric Piel
Copyright © 2013 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
from Pyro4.errors import CommunicationError
import collections
import logging
import math
from odemis import model
import os
import re
import sys
import threading
def getSerialDriver(name):
"""
return (string): the name of the serial driver used for the given port
"""
# In linux, can be found as link of /sys/class/tty/tty*/device/driver
if sys.platform.startswith('linux'):
path = ("/sys/class/tty/" + os.path.basename(os.path.realpath(name))
+ "/device/driver")
try:
return os.path.basename(os.readlink(path))
except OSError:
return "Unknown"
else:
# TODO: Windows version
return "Unknown"
def get_linux_version():
"""
return (tuple of 3 int): major, minor, micro
raise LookupError: if the version fails to find (eg: not a Linux kernel)
"""
try:
lv = os.uname()[2] # version string
sv = re.match(r"\d+\.\d+\.\d+", lv).group() # get the raw version, without -XXX
return tuple(int(s) for s in sv.split("."))
except AttributeError: # No uname, or no match
raise LookupError("Failed to find Linux version")
# From http://code.activestate.com/recipes/286222/
_SCALE = {'KB': 2 ** 10, 'MB': 2 ** 20}
def _VmB(VmKey):
"""
Read the memory usage for a given type
Note: only supported on Linux
return (int): memory used in bytes
"""
proc_status = '/proc/%d/status' % os.getpid()
# get pseudo file /proc/<pid>/status
try:
t = open(proc_status)
v = t.read()
t.close()
except Exception:
raise NotImplementedError("Non POSIX system not supported")
try:
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey + ":")
v = v[i:].split(None, 3) # whitespaces, 4 parts
if len(v) < 3:
raise ValueError("Failed to find memory key %s" % (VmKey,))
# convert to bytes
return int(v[1]) * _SCALE[v[2].upper()]
except (ValueError, TypeError, KeyError):
raise NotImplementedError("System not reporting memory key %s" % (VmKey,))
def readMemoryUsage():
"""
return (int): memory usage in bytes.
raises:
NotImpelementedError if OS is not supported
"""
try:
import psutil
process = psutil.Process(os.getpid())
if hasattr(process, "get_memory_info"):
# Old API (v1.0 and below)
mem = process.get_memory_info().rss
else:
# API for psutil v2+
mem = process.memory_info().rss
return mem
except ImportError:
return _VmB('VmRSS')
def estimateMoveDuration(distance, speed, accel):
"""
Compute the theoretical duration of a move given the maximum speed and
acceleration. It considers that the speed curve of the move will follow
a trapezoidal profile: first acceleration, then maximum speed, and then
deceleration.
distance (0 <= float): distance that will be travelled (in m)
speed (0 < float): maximum speed allowed (in m/s)
accel (0 < float): acceleration and deceleration (in m²/s)
return (0 <= float): time in s
"""
# Given the distance to be traveled, determine whether we have a
# triangular or a trapezoidal motion profile.
A = (2 * accel) / (accel ** 2)
s = 0.5 * A * speed ** 2
if distance > s:
t1 = speed / accel
t2 = (distance - s) / speed
t3 = speed / accel
return t1 + t2 + t3
else:
vp = math.sqrt(2.0 * distance / A)
t1 = vp / accel
t2 = vp / accel
return t1 + t2
def checkLightBand(band):
"""
Check that the given object looks like a light band. It should either be
two float representing light wavelength in m, or a list of such tuple.
band (object): should be tuple of floats or list of tuple of floats
raise ValueError: if the band doesn't follow the convention
"""
if not isinstance(band, collections.Iterable) or len(band) == 0:
raise ValueError("Band %r is not a (list of a) list of 2 floats" % (band,))
# is it a list of list?
if isinstance(band[0], collections.Iterable):
# => set of 2-tuples
for sb in band:
if len(sb) != 2:
raise ValueError("Expected only 2 floats in band, found %d" % len(sb))
band = tuple(band)
else:
# 2-tuple
if len(band) != 2:
raise ValueError("Expected only 2 floats in band, found %d" % len(band))
band = (tuple(band),)
# Check the values are min/max and in m: typically within nm (< µm!)
max_val = 10e-6 # m
for low, high in band:
if low > high:
raise ValueError("Min of band %s must be first in list" % (band,))
if low < 0:
raise ValueError("Band %s must be 2 positive value in meters" % (band,))
if low > max_val or high > max_val:
raise ValueError("Band %s contains very high values for light "
"wavelength, ensure the value is in meters." % (band,))
# no error found
# Special trick functions for speeding up Pyro start-up
def _speedUpPyroVAConnect(comp):
"""
Ensures that all the VAs of the component will be quick to access
comp (Component)
"""
# Force the creation of the connection
# If the connection already exists it's very fast, otherwise, we wait
# for the connection to be created in a separate thread
for name, va in model.getVAs(comp).items():
t = threading.Thread(name="Connection to VA %s.%s" % (comp.name, name),
target=va._pyroBind)
t.daemon = True
t.start()
def speedUpPyroConnect(comp):
"""
Ensures that all the children of the component will be quick to access.
It does nothing but speed up later access.
comp (Component)
"""
# each connection is pretty fast (~10ms) but when listing all the VAs of
# all the components, it can easily add up to 1s if done sequentially.
def bind_obj(obj):
# logging.debug("binding comp %s", obj.name)
obj._pyroBind()
speedUpPyroConnect(obj)
_speedUpPyroVAConnect(comp)
for child in comp.children.value:
t = threading.Thread(name="Connection to %s" % child.name, target=bind_obj, args=(child,))
t.start()
BACKEND_RUNNING = "RUNNING"
BACKEND_STARTING = "STARTING"
BACKEND_DEAD = "DEAD"
BACKEND_STOPPED = "STOPPED"
# TODO: support TERMINATING status?
def get_backend_status():
try:
model._core._microscope = None # force reset of the microscope
microscope = model.getMicroscope()
if not microscope.ghosts.value:
return BACKEND_RUNNING
else:
# Not all components are working => we are "starting" (or borked)
return BACKEND_STARTING
except (IOError, CommunicationError):
if os.path.exists(model.BACKEND_FILE):
logging.debug("No microscope found, it's sign the back-end is not responding")
return BACKEND_DEAD
else:
logging.debug("Back-end %s file doesn't exists", model.BACKEND_FILE)
return BACKEND_STOPPED
except:
logging.exception("Unresponsive back-end")
return BACKEND_DEAD
return BACKEND_DEAD
|
mozilla/stoneridge
|
refs/heads/master
|
python/src/Doc/includes/sqlite3/collation_reverse.py
|
44
|
import sqlite3
def collate_reverse(string1, string2):
return -cmp(string1, string2)
con = sqlite3.connect(":memory:")
con.create_collation("reverse", collate_reverse)
cur = con.cursor()
cur.execute("create table test(x)")
cur.executemany("insert into test(x) values (?)", [("a",), ("b",)])
cur.execute("select x from test order by x collate reverse")
for row in cur:
print row
con.close()
|
WilliamYi96/Machine-Learning
|
refs/heads/master
|
LeetCode/0012.py
|
1
|
# Greedy algorithm
class Solution:
def intToRoman(self, num):
nums = [1000,900, 500,400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ]
symbols = ['M', 'CM','D','CD','C','XC','L','XL','X','IX','V','IV','I']
mstr = ''
for i in range(len(nums)):
while num >= nums[i]:
mstr += symbols[i]
num -= nums[i]
return mstr
|
google/llvm-propeller
|
refs/heads/bb-clusters
|
compiler-rt/test/sanitizer_common/android_commands/android_common.py
|
14
|
import os, sys, subprocess, tempfile
import time
ANDROID_TMPDIR = '/data/local/tmp/Output'
ADB = os.environ.get('ADB', 'adb')
verbose = False
if os.environ.get('ANDROID_RUN_VERBOSE') == '1':
verbose = True
def host_to_device_path(path):
rel = os.path.relpath(path, "/")
dev = os.path.join(ANDROID_TMPDIR, rel)
return dev
def adb(args, attempts = 1):
if verbose:
print args
tmpname = tempfile.mktemp()
out = open(tmpname, 'w')
ret = 255
while attempts > 0 and ret != 0:
attempts -= 1
ret = subprocess.call([ADB] + args, stdout=out, stderr=subprocess.STDOUT)
if attempts != 0:
ret = 5
if ret != 0:
print "adb command failed", args
print tmpname
out.close()
out = open(tmpname, 'r')
print out.read()
out.close()
os.unlink(tmpname)
return ret
def pull_from_device(path):
tmp = tempfile.mktemp()
adb(['pull', path, tmp], 5)
text = open(tmp, 'r').read()
os.unlink(tmp)
return text
def push_to_device(path):
dst_path = host_to_device_path(path)
adb(['push', path, dst_path], 5)
|
ttingchan/MerchanDolibarr
|
refs/heads/master
|
htdocs/includes/jquery/plugins/jqueryFileTree/connectors/jqueryFileTree.py
|
157
|
#
# jQuery File Tree
# Python/Django connector script
# By Martin Skou
#
import os
import urllib
def dirlist(request):
r=['<ul class="jqueryFileTree" style="display: none;">']
try:
r=['<ul class="jqueryFileTree" style="display: none;">']
d=urllib.unquote(request.POST.get('dir','c:\\temp'))
for f in os.listdir(d):
ff=os.path.join(d,f)
if os.path.isdir(ff):
r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f))
else:
e=os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f))
r.append('</ul>')
except Exception,e:
r.append('Could not load directory: %s' % str(e))
r.append('</ul>')
return HttpResponse(''.join(r))
|
CodeDJ/qt5-hidpi
|
refs/heads/master
|
qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/runtests.py
|
121
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import platform
import sys
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.executive import ScriptError
_log = logging.getLogger(__name__)
class RunTests(AbstractStep):
# FIXME: This knowledge really belongs in the commit-queue.
NON_INTERACTIVE_FAILURE_LIMIT_COUNT = 30
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.build_style,
Options.test,
Options.non_interactive,
Options.quiet,
]
def run(self, state):
if not self._options.test:
return
if not self._options.non_interactive:
# FIXME: We should teach the commit-queue and the EWS how to run these tests.
python_unittests_command = self._tool.deprecated_port().run_python_unittests_command()
if python_unittests_command:
_log.info("Running Python unit tests")
self._tool.executive.run_and_throw_if_fail(python_unittests_command, cwd=self._tool.scm().checkout_root)
perl_unittests_command = self._tool.deprecated_port().run_perl_unittests_command()
if perl_unittests_command:
_log.info("Running Perl unit tests")
self._tool.executive.run_and_throw_if_fail(perl_unittests_command, cwd=self._tool.scm().checkout_root)
javascriptcore_tests_command = self._tool.deprecated_port().run_javascriptcore_tests_command()
if javascriptcore_tests_command:
_log.info("Running JavaScriptCore tests")
self._tool.executive.run_and_throw_if_fail(javascriptcore_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
bindings_tests_command = self._tool.deprecated_port().run_bindings_tests_command()
if bindings_tests_command:
_log.info("Running bindings generation tests")
args = bindings_tests_command
try:
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.info("Error running run-bindings-tests: %s" % e.message_with_output())
webkit_unit_tests_command = self._tool.deprecated_port().run_webkit_unit_tests_command()
if webkit_unit_tests_command:
_log.info("Running WebKit unit tests")
args = webkit_unit_tests_command
try:
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.info("Error running webkit_unit_tests: %s" % e.message_with_output())
_log.info("Running run-webkit-tests")
args = self._tool.deprecated_port().run_webkit_tests_command()
if self._options.non_interactive:
args.extend([
"--no-new-test-results",
"--no-show-results",
"--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT,
])
# old-run-webkit-tests does not support --skip-failing-tests
# Using --quiet one Windows fails when we try to use /dev/null, disabling for now until we find a fix
if sys.platform != "cygwin":
args.append("--quiet")
args.append("--skip-failing-tests")
else:
args.append("--no-build");
if self._options.quiet:
args.append("--quiet")
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
|
GoogleCloudPlatform/training-data-analyst
|
refs/heads/master
|
courses/machine_learning/deepdive2/structured/labs/serving/application/lib/werkzeug/wrappers/common_descriptors.py
|
10
|
from datetime import datetime
from datetime import timedelta
from .._compat import string_types
from ..datastructures import CallbackDict
from ..http import dump_age
from ..http import dump_header
from ..http import dump_options_header
from ..http import http_date
from ..http import parse_age
from ..http import parse_date
from ..http import parse_options_header
from ..http import parse_set_header
from ..utils import cached_property
from ..utils import environ_property
from ..utils import get_content_type
from ..utils import header_property
from ..wsgi import get_content_length
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property(
"CONTENT_TYPE",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property(
"HTTP_CONTENT_ENCODING",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
)
content_md5 = environ_property(
"HTTP_CONTENT_MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
)
referrer = environ_property(
"HTTP_REFERER",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
)
date = environ_property(
"HTTP_DATE",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
max_forwards = environ_property(
"HTTP_MAX_FORWARDS",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.environ.get("CONTENT_TYPE", "")
)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get("HTTP_PRAGMA", ""))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
@property
def mimetype(self):
"""The mimetype (content type without charset etc.)"""
ct = self.headers.get("content-type")
if ct:
return ct.split(";")[0].strip()
@mimetype.setter
def mimetype(self, value):
self.headers["Content-Type"] = get_content_type(value, self.charset)
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
location = header_property(
"Location",
doc="""The Location response-header field is used to redirect
the recipient to a location other than the Request-URI for
completion of the request or identification of a new
resource.""",
)
age = header_property(
"Age",
None,
parse_age,
dump_age,
doc="""The Age response-header field conveys the sender's
estimate of the amount of time since the response (or its
revalidation) was generated at the origin server.
Age values are non-negative decimal integers, representing time
in seconds.""",
)
content_type = header_property(
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
content_length = header_property(
"Content-Length",
None,
int,
str,
doc="""The Content-Length entity-header field indicates the size
of the entity-body, in decimal number of OCTETs, sent to the
recipient or, in the case of the HEAD method, the size of the
entity-body that would have been sent had the request been a
GET.""",
)
content_location = header_property(
"Content-Location",
doc="""The Content-Location entity-header field MAY be used to
supply the resource location for the entity enclosed in the
message when that entity is accessible from a location separate
from the requested resource's URI.""",
)
content_encoding = header_property(
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.""",
)
content_md5 = header_property(
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)""",
)
date = header_property(
"Date",
None,
parse_date,
http_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
expires = header_property(
"Expires",
None,
parse_date,
http_date,
doc="""The Expires entity-header field gives the date/time after
which the response is considered stale. A stale cache entry may
not normally be returned by a cache.""",
)
last_modified = header_property(
"Last-Modified",
None,
parse_date,
http_date,
doc="""The Last-Modified entity-header field indicates the date
and time at which the origin server believes the variant was
last modified.""",
)
@property
def retry_after(self):
"""The Retry-After response-header field can be used with a
503 (Service Unavailable) response to indicate how long the
service is expected to be unavailable to the requesting client.
Time in seconds until expiration or date.
"""
value = self.headers.get("retry-after")
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
@retry_after.setter
def retry_after(self, value):
if value is None:
if "retry-after" in self.headers:
del self.headers["retry-after"]
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers["Retry-After"] = value
def _set_property(name, doc=None): # noqa: B902
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property(
"Vary",
doc="""The Vary field value indicates the set of request-header
fields that fully determines, while the response is fresh,
whether a cache is permitted to use the response to reply to a
subsequent request without revalidation.""",
)
content_language = _set_property(
"Content-Language",
doc="""The Content-Language entity-header field describes the
natural language(s) of the intended audience for the enclosed
entity. Note that this might not be equivalent to all the
languages used within the entity-body.""",
)
allow = _set_property(
"Allow",
doc="""The Allow entity-header field lists the set of methods
supported by the resource identified by the Request-URI. The
purpose of this field is strictly to inform the recipient of
valid methods associated with the resource. An Allow header
field MUST be present in a 405 (Method Not Allowed)
response.""",
)
del _set_property
|
Fl0rianFischer/sme_odoo
|
refs/heads/9.0
|
addons/mail_tip/__openerp__.py
|
19
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Mail Tips',
'category': 'Usability',
'description': """
OpenERP link module for web tips.
=================================
""",
'version': '0.1',
'depends': ['web_tip', 'mail'],
'data': [
'views/mail_tip.xml',
],
'auto_install': True
}
|
socialwareinc/Diamond
|
refs/heads/master
|
src/collectors/smart/smart.py
|
56
|
# coding=utf-8
"""
Collect data from S.M.A.R.T.'s attribute reporting.
#### Dependencies
* [smartmontools](http://sourceforge.net/apps/trac/smartmontools/wiki)
"""
import diamond.collector
import subprocess
import re
import os
from diamond.collector import str_to_bool
class SmartCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SmartCollector, self).get_default_config_help()
config_help.update({
'devices': "device regex to collect stats on",
'bin': 'The path to the smartctl binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(SmartCollector, self).get_default_config()
config.update({
'path': 'smart',
'bin': 'smartctl',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$',
})
return config
def collect(self):
"""
Collect and publish S.M.A.R.T. attributes
"""
devices = re.compile(self.config['devices'])
for device in os.listdir('/dev'):
if devices.match(device):
command = [self.config['bin'], "-A", os.path.join('/dev',
device)]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
attributes = subprocess.Popen(
command,
stdout=subprocess.PIPE
).communicate()[0].strip().splitlines()
metrics = {}
start_line = self.find_attr_start_line(attributes)
for attr in attributes[start_line:]:
attribute = attr.split()
if attribute[1] != "Unknown_Attribute":
metric = "%s.%s" % (device, attribute[1])
else:
metric = "%s.%s" % (device, attribute[0])
# New metric? Store it
if metric not in metrics:
metrics[metric] = attribute[9]
# Duplicate metric? Only store if it has a larger value
# This happens semi-often with the Temperature_Celsius
# attribute You will have a PASS/FAIL after the real temp,
# so only overwrite if The earlier one was a
# PASS/FAIL (0/1)
elif metrics[metric] == 0 and attribute[9] > 0:
metrics[metric] = attribute[9]
else:
continue
for metric in metrics.keys():
self.publish(metric, metrics[metric])
def find_attr_start_line(self, lines, min_line=4, max_line=9):
"""
Return line number of the first real attribute and value.
The first line is 0. If the 'ATTRIBUTE_NAME' header is not
found, return the index after max_line.
"""
for idx, line in enumerate(lines[min_line:max_line]):
col = line.split()
if len(col) > 1 and col[1] == 'ATTRIBUTE_NAME':
return idx + min_line + 1
self.log.warn('ATTRIBUTE_NAME not found in second column of'
' smartctl output between lines %d and %d.'
% (min_line, max_line))
return max_line + 1
|
nachandr/cfme_tests
|
refs/heads/master
|
cfme/tests/intelligence/test_download_report.py
|
2
|
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [test_requirements.report]
@pytest.fixture(scope="module")
def report(appliance):
saved_report = appliance.collections.reports.instantiate(
type="Configuration Management",
subtype="Virtual Machines",
menu_name="Hardware Information for VMs",
).queue(wait_for_finish=True)
yield saved_report
saved_report.delete(cancel=False)
@pytest.mark.parametrize("filetype", ["txt", "csv", "pdf"])
@pytest.mark.provider([InfraProvider], selector=ONE, scope="module")
def test_download_report(setup_provider_modscope, report, filetype):
"""Download the report as a file.
Polarion:
assignee: pvala
casecomponent: Reporting
caseimportance: high
initialEstimate: 1/20h
"""
if filetype == "pdf":
view = navigate_to(report, "Details")
# since multiple window handling is not possible, we just assert that the option is enabled.
assert view.download.item_enabled("Print or export as PDF")
else:
report.download(filetype)
|
beck/django
|
refs/heads/master
|
tests/migrations/test_migrations_fake_split_initial/0001_initial.py
|
315
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
),
migrations.AlterUniqueTogether(
name='author',
unique_together=set([('name', 'slug')]),
),
]
|
ttthy1/2017sejongAI
|
refs/heads/master
|
week13/regression/Linear Regression Example(random data).py
|
2
|
import numpy as np
import matplotlib.pyplot as plt
num_points = 50
vector_set = []
for i in range(num_points):
x1 = np.random.normal(0.0,0.55)
y1 = x1 * 0.1 +0.3+np.random.normal(0.0,0.03)
vector_set.append([x1,y1])
x_data = [v[0] for v in vector_set]
y_data = [v[1] for v in vector_set]
plt.plot(x_data, y_data, 'ro')
plt.legend()
plt.show()
|
vrv/tensorflow
|
refs/heads/master
|
tensorflow/contrib/memory_stats/python/kernel_tests/memory_stats_ops_test.py
|
30
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory statistics ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.memory_stats.python.ops import memory_stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MemoryStatsOpsTest(test_util.TensorFlowTestCase):
def testBytesLimit(self):
# AllocatorStats.bytes_limit is set to zero for CPU allocators, so we skip
# the check.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
bytes_limit = sess.run(memory_stats_ops.BytesLimit())
self.assertLess(0, bytes_limit)
# Tests the peak memory usage of the following computation.
# a b
# | / |
# c |
# \ |
# \ |
# d
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
# computation needs space for only three matrix plus some small overhead.
def testChainOfMatmul(self):
# MaxBytesInUse is registerd on GPU only. See kernels/memory_stats_ops.cc.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True) as sess:
matrix_size = 64
matrix_shape = tensor_shape.TensorShape([matrix_size, matrix_size])
dtype = dtypes.float32
matrix_size_in_bytes = matrix_shape.num_elements() * dtype.size
a = random_ops.random_uniform(matrix_shape, dtype=dtype)
b = random_ops.random_uniform(matrix_shape, dtype=dtype)
c = math_ops.matmul(a, b)
d = math_ops.matmul(c, b)
sess.run(d)
max_bytes_in_use = sess.run(memory_stats_ops.MaxBytesInUse())
self.assertGreaterEqual(max_bytes_in_use, matrix_size_in_bytes * 3)
self.assertLess(max_bytes_in_use, matrix_size_in_bytes * 4)
if __name__ == '__main__':
test.main()
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/debug/wdg_validate.py
|
50
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware that tests the validity of all generated HTML using the
`WDG HTML Validator <http://www.htmlhelp.com/tools/validator/>`_
"""
from cStringIO import StringIO
import subprocess
from paste.response import header_value
import re
import cgi
__all__ = ['WDGValidateMiddleware']
class WDGValidateMiddleware(object):
"""
Middleware that checks HTML and appends messages about the validity of
the HTML. Uses: http://www.htmlhelp.com/tools/validator/ -- interacts
with the command line client. Use the configuration ``wdg_path`` to
override the path (default: looks for ``validate`` in $PATH).
To install, in your web context's __init__.py::
def urlparser_wrap(environ, start_response, app):
return wdg_validate.WDGValidateMiddleware(app)(
environ, start_response)
Or in your configuration::
middleware.append('paste.wdg_validate.WDGValidateMiddleware')
"""
_end_body_regex = re.compile(r'</body>', re.I)
def __init__(self, app, global_conf=None, wdg_path='validate'):
self.app = app
self.wdg_path = wdg_path
def __call__(self, environ, start_response):
output = StringIO()
response = []
def writer_start_response(status, headers, exc_info=None):
response.extend((status, headers))
start_response(status, headers, exc_info)
return output.write
app_iter = self.app(environ, writer_start_response)
try:
for s in app_iter:
output.write(s)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
page = output.getvalue()
status, headers = response
v = header_value(headers, 'content-type') or ''
if (not v.startswith('text/html')
and not v.startswith('text/xhtml')
and not v.startswith('application/xhtml')):
# Can't validate
# @@: Should validate CSS too... but using what?
return [page]
ops = []
if v.startswith('text/xhtml+xml'):
ops.append('--xml')
# @@: Should capture encoding too
html_errors = self.call_wdg_validate(
self.wdg_path, ops, page)
if html_errors:
page = self.add_error(page, html_errors)[0]
headers.remove(
('Content-Length',
str(header_value(headers, 'content-length'))))
headers.append(('Content-Length', str(len(page))))
return [page]
def call_wdg_validate(self, wdg_path, ops, page):
if subprocess is None:
raise ValueError(
"This middleware requires the subprocess module from "
"Python 2.4")
proc = subprocess.Popen([wdg_path] + ops,
shell=False,
close_fds=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = proc.communicate(page)[0]
proc.wait()
return stdout
def add_error(self, html_page, html_errors):
add_text = ('<pre style="background-color: #ffd; color: #600; '
'border: 1px solid #000;">%s</pre>'
% cgi.escape(html_errors))
match = self._end_body_regex.search(html_page)
if match:
return [html_page[:match.start()]
+ add_text
+ html_page[match.start():]]
else:
return [html_page + add_text]
def make_wdg_validate_middleware(
app, global_conf, wdg_path='validate'):
"""
Wraps the application in the WDG validator from
http://www.htmlhelp.com/tools/validator/
Validation errors are appended to the text of each page.
You can configure this by giving the path to the validate
executable (by default picked up from $PATH)
"""
return WDGValidateMiddleware(
app, global_conf, wdg_path=wdg_path)
|
djw8605/condor
|
refs/heads/add-force
|
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/sendAll.py
|
10
|
#! /s/python-2.5/i386_rhel5/bin/python
#
# Starts a Skype File Transfer.
# The open file dialog is set to the current directory.
# The users to send to are searched through their Skype name and their
# full name (display name actually).
#
# Requires at least Skype 2.0.0.27.
#
# (c) Copyright 2007, Vincent Oberle, vincent@oberle.org
#
# This software may be used and distributed according to the terms
# of the GNU Public License, incorporated herein by reference.
import os
import sys
import re
from optparse import OptionParser
import Skype4Py
parser = OptionParser('%prog user [user*]', version="%prog 0.1")
options, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(0)
dest = args[1:]
d= args[0]
msg = ''
for m in dest:
msg +=m+' '
path = os.getcwd() # use current working directory
skype = Skype4Py.Skype()
skype.FriendlyName = 'file_sender'
skype.Attach() # attach to Skype client
ids = []
print dest
# quick-filter type of search
# first look for exact Skype ID match
for user in skype.Friends:
#print 'Found exact match'
if user.Handle.lower().find(d.lower()) != -1: ids.append(user)
elif user.DisplayName.lower().find(d.lower()) != -1: ids.append(user)
#ids.append(user)
found = True
# break
# if found: continue
# for user in skype.Friends:
# print who will send to, no need to ask for confirmation as user can still cancel the open file window
#msg = "Hi Condor Messaging Integration Test..good morning :)"
#msg = dest[0]
print 'Sending to:'
for i in ids:
if i.FullName: print ' ' + i.FullName + ' (' + i.Handle + ')'
else: print ' ' + i.Handle
skype.SendMessage(i.Handle, msg)
if ids: # ids is a list of Skype IDs
in_str = ''
if path: in_str = ' IN ' + path
ids_str = ids[0].Handle
for i in ids[1:]: ids_str = ids_str + ', ' + i.Handle
# OPEN FILETRANSFER echo123 IN C:\temp
cmd = 'OPEN FILETRANSFER ' + ids_str + in_str
# print cmd
# skype.SendCommand(skype.Command(cmd))
|
abo-abo/edx-platform
|
refs/heads/master
|
common/djangoapps/student/tests/test_auto_auth.py
|
11
|
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from util.testing import UrlResetMixin
from mock import patch
from django.core.urlresolvers import reverse, NoReverseMatch
class AutoAuthEnabledTestCase(UrlResetMixin, TestCase):
"""
Tests for the Auto auth view that we have for load testing.
"""
@patch.dict("django.conf.settings.MITX_FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": True})
def setUp(self):
# Patching the settings.MITX_FEATURES['AUTOMATIC_AUTH_FOR_TESTING']
# value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(AutoAuthEnabledTestCase, self).setUp()
self.url = '/auto_auth'
self.cms_csrf_url = "signup"
self.lms_csrf_url = "signin_user"
self.client = Client()
def test_create_user(self):
"""
Test that user gets created when visiting the page.
"""
self.client.get(self.url)
qset = User.objects.all()
# assert user was created and is active
self.assertEqual(qset.count(), 1)
user = qset[0]
assert user.is_active
def test_create_defined_user(self):
"""
Test that the user gets created with the correct attributes
when they are passed as parameters on the auto-auth page.
"""
self.client.get(
self.url,
{'username': 'robot', 'password': 'test', 'email': 'robot@edx.org'}
)
qset = User.objects.all()
# assert user was created with the correct username and password
self.assertEqual(qset.count(), 1)
user = qset[0]
self.assertEqual(user.username, 'robot')
self.assertTrue(user.check_password('test'))
self.assertEqual(user.email, 'robot@edx.org')
@patch('student.views.random.randint')
def test_create_multiple_users(self, randint):
"""
Test to make sure multiple users are created.
"""
randint.return_value = 1
self.client.get(self.url)
randint.return_value = 2
self.client.get(self.url)
qset = User.objects.all()
# make sure that USER_1 and USER_2 were created correctly
self.assertEqual(qset.count(), 2)
user1 = qset[0]
self.assertEqual(user1.username, 'USER_1')
self.assertTrue(user1.check_password('PASS_1'))
self.assertEqual(user1.email, 'USER_1_dummy_test@mitx.mit.edu')
self.assertEqual(qset[1].username, 'USER_2')
@patch.dict("django.conf.settings.MITX_FEATURES", {"MAX_AUTO_AUTH_USERS": 1})
def test_login_already_created_user(self):
"""
Test that when we have reached the limit for automatic users
a subsequent request results in an already existant one being
logged in.
"""
# auto-generate 1 user (the max)
url = '/auto_auth'
self.client.get(url)
# go to the site again
self.client.get(url)
qset = User.objects.all()
# make sure it is the same user
self.assertEqual(qset.count(), 1)
class AutoAuthDisabledTestCase(UrlResetMixin, TestCase):
"""
Test that the page is inaccessible with default settings
"""
@patch.dict("django.conf.settings.MITX_FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": False})
def setUp(self):
# Patching the settings.MITX_FEATURES['AUTOMATIC_AUTH_FOR_TESTING']
# value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(AutoAuthDisabledTestCase, self).setUp()
self.url = '/auto_auth'
self.client = Client()
def test_auto_auth_disabled(self):
"""
Make sure automatic authentication is disabled.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_csrf_enabled(self):
"""
test that when not load testing, csrf protection is on
"""
cms_csrf_url = "signup"
lms_csrf_url = "signin_user"
self.client = Client(enforce_csrf_checks=True)
try:
csrf_protected_url = reverse(cms_csrf_url)
response = self.client.post(csrf_protected_url)
except NoReverseMatch:
csrf_protected_url = reverse(lms_csrf_url)
response = self.client.post(csrf_protected_url)
self.assertEqual(response.status_code, 403)
|
veger/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/ucs/ucs_wwn_pool.py
|
46
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_wwn_pool
short_description: Configures WWNN or WWPN pools on Cisco UCS Manager
description:
- Configures WWNNs or WWPN pools on Cisco UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify WWNNs/WWPNs are present and will create if needed.
- If C(absent), will verify WWNNs/WWPNs are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name of the World Wide Node Name (WWNN) or World Wide Port Name (WWPN) pool.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the WWNN or WWPN pool is created.
required: yes
purpose:
description:
- Specify whether this is a node (WWNN) or port (WWPN) pool.
- Optional if state is absent.
choices: [node, port]
required: yes
description:
description:
- A description of the WWNN or WWPN pool.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
order:
description:
- The Assignment Order field.
- "This can be one of the following:"
- "default - Cisco UCS Manager selects a random identity from the pool."
- "sequential - Cisco UCS Manager selects the lowest available identity from the pool."
choices: [default, sequential]
default: default
first_addr:
description:
- The first initiator in the World Wide Name (WWN) block.
- This is the From field in the UCS Manager Add WWN Blocks menu.
last_addr:
description:
- The last initiator in the Worlde Wide Name (WWN) block.
- This is the To field in the UCS Manager Add WWN Blocks menu.
- For WWxN pools, the pool size must be a multiple of ports-per-node + 1.
- For example, if there are 7 ports per node, the pool size must be a multiple of 8.
- If there are 63 ports per node, the pool size must be a multiple of 64.
org_dn:
description:
- Org dn (distinguished name)
default: org-root
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure WWNN/WWPN pools
ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWNN-Pool
purpose: node
first_addr: 20:00:00:25:B5:48:00:00
last_addr: 20:00:00:25:B5:48:00:0F
- ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWPN-Pool-A
purpose: port
order: sequential
first_addr: 20:00:00:25:B5:48:0A:00
last_addr: 20:00:00:25:B5:48:0A:0F
- name: Remove WWNN/WWPN pools
ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWNN-Pool
state: absent
- ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWPN-Pool-A
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str'),
purpose=dict(type='str', choices=['node', 'port']),
descr=dict(type='str'),
order=dict(type='str', default='default', choices=['default', 'sequential']),
first_addr=dict(type='str'),
last_addr=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
wwn_list=dict(type='list'),
)
# Note that use of wwn_list is an experimental feature which allows multiple resource updates with a single UCSM connection.
# Support for wwn_list may change or be removed once persistent UCS connections are supported.
# Either wwn_list or name is required (user can specify either a list or single resource).
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_one_of=[
['wwn_list', 'name']
],
mutually_exclusive=[
['wwn_list', 'name']
],
)
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.fcpool.FcpoolInitiators import FcpoolInitiators
from ucsmsdk.mometa.fcpool.FcpoolBlock import FcpoolBlock
changed = False
try:
# Only documented use is a single resource, but to also support experimental
# feature allowing multiple updates all params are converted to a wwn_list below.
if module.params['wwn_list']:
# directly use the list (single resource and list are mutually exclusive
wwn_list = module.params['wwn_list']
else:
# single resource specified, create list from the current params
wwn_list = [module.params]
for wwn in wwn_list:
mo_exists = False
props_match = False
# set default params. Done here to set values for lists which can't be done in the argument_spec
if not wwn.get('descr'):
wwn['descr'] = ''
if not wwn.get('order'):
wwn['order'] = 'default'
# dn is <org_dn>/wwn-pool-<name> for WWNN or WWPN
dn = module.params['org_dn'] + '/wwn-pool-' + wwn['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
# append purpose param with suffix used by UCSM
purpose_param = wwn['purpose'] + '-wwn-assignment'
if mo_exists:
# check top-level mo props
kwargs = dict(assignment_order=wwn['order'])
kwargs['descr'] = wwn['descr']
kwargs['purpose'] = purpose_param
if (mo.check_prop_match(**kwargs)):
# top-level props match, check next level mo/props
if 'last_addr' in wwn and 'first_addr' in wwn:
block_dn = dn + '/block-' + wwn['first_addr'].upper() + '-' + wwn['last_addr'].upper()
mo_1 = ucs.login_handle.query_dn(block_dn)
if mo_1:
props_match = True
else:
props_match = True
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = FcpoolInitiators(
parent_mo_or_dn=module.params['org_dn'],
name=wwn['name'],
descr=wwn['descr'],
assignment_order=wwn['order'],
purpose=purpose_param,
)
if 'last_addr' in wwn and 'first_addr' in wwn:
mo_1 = FcpoolBlock(
parent_mo_or_dn=mo,
to=wwn['last_addr'],
r_from=wwn['first_addr'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
Acidburn0zzz/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/perform_actions/key_modifiers.py
|
41
|
import pytest
from tests.perform_actions.support.keys import Keys
@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT])
def test_shift_modifier_and_non_printable_keys(session, key_reporter, key_chain, modifier):
key_chain \
.send_keys("foo") \
.key_down(modifier) \
.key_down(Keys.BACKSPACE) \
.key_up(modifier) \
.key_up(Keys.BACKSPACE) \
.perform()
assert key_reporter.property("value") == "fo"
@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT])
def test_shift_modifier_generates_capital_letters(session, key_reporter, key_chain, modifier):
key_chain \
.send_keys("b") \
.key_down(modifier) \
.key_down("c") \
.key_up(modifier) \
.key_up("c") \
.key_down("d") \
.key_up("d") \
.key_down(modifier) \
.key_down("e") \
.key_up("e") \
.key_down("f") \
.key_up(modifier) \
.key_up("f") \
.perform()
assert key_reporter.property("value") == "bCdEF"
|
jamielennox/keystone
|
refs/heads/master
|
keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
|
11
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
ASSIGNMENT_TABLE = 'assignment'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
idx = sql.Index('ix_actor_id', assignment.c.actor_id)
idx.create(migrate_engine)
|
sxlijin/tfs_updater
|
refs/heads/master
|
lib/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
adam111316/SickGear
|
refs/heads/master
|
lib/sqlalchemy/connectors/pyodbc.py
|
80
|
# connectors/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import Connector
from .. import util
import sys
import re
class PyODBCConnector(Connector):
driver = 'pyodbc'
supports_sane_multi_rowcount = False
if util.py2k:
# PyODBC unicode is broken on UCS-4 builds
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = supports_unicode
supports_native_decimal = True
default_paramstyle = 'named'
# for non-DSN connections, this should
# hold the desired driver name
pyodbc_driver_name = None
# will be set to True after initialize()
# if the freetds.so is detected
freetds = False
# will be set to the string version of
# the FreeTDS driver if freetds is detected
freetds_driver_version = None
# will be set to True after initialize()
# if the libessqlsrv.so is detected
easysoft = False
def __init__(self, supports_unicode_binds=None, **kw):
super(PyODBCConnector, self).__init__(**kw)
self._user_supports_unicode_binds = supports_unicode_binds
@classmethod
def dbapi(cls):
return __import__('pyodbc')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ('ansi', 'unicode_results', 'autocommit'):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if 'odbc_connect' in keys:
connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
else:
dsn_connection = 'dsn' in keys or \
('host' in keys and 'database' not in keys)
if dsn_connection:
connectors = ['dsn=%s' % (keys.pop('host', '') or \
keys.pop('dsn', ''))]
else:
port = ''
if 'port' in keys and not 'port' in query:
port = ',%d' % int(keys.pop('port'))
connectors = ["DRIVER={%s}" %
keys.pop('driver', self.pyodbc_driver_name),
'Server=%s%s' % (keys.pop('host', ''), port),
'Database=%s' % keys.pop('database', '')]
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop('password', ''))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return "The cursor's connection has been closed." in str(e) or \
'Attempt to use a closed connection.' in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def initialize(self, connection):
# determine FreeTDS first. can't issue SQL easily
# without getting unicode_statements/binds set up.
pyodbc = self.dbapi
dbapi_con = connection.connection
_sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
))
self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
))
if self.freetds:
self.freetds_driver_version = dbapi_con.getinfo(
pyodbc.SQL_DRIVER_VER)
self.supports_unicode_statements = (
not util.py2k or
(not self.freetds and not self.easysoft)
)
if self._user_supports_unicode_binds is not None:
self.supports_unicode_binds = self._user_supports_unicode_binds
elif util.py2k:
self.supports_unicode_binds = (
not self.freetds or self.freetds_driver_version >= '0.91'
) and not self.easysoft
else:
self.supports_unicode_binds = True
# run other initialization which asks for user name, etc.
super(PyODBCConnector, self).initialize(connection)
def _dbapi_version(self):
if not self.dbapi:
return ()
return self._parse_dbapi_version(self.dbapi.version)
def _parse_dbapi_version(self, vers):
m = re.match(
r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
vers
)
if not m:
return ()
vers = tuple([int(x) for x in m.group(1).split(".")])
if m.group(2):
vers += (m.group(2),)
return vers
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
|
prakritish/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_snmp_community.py
|
19
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
required: false
default: null
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
required: false
default: null
acl:
description:
- ACL name to filter snmp requests.
required: false
default: 1
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "network-operator"}
existing:
description: k/v pairs of existing snmp community
type: dict
sample: {}
end_state:
description: k/v pairs of snmp community after module execution
returned: always
type: dict or null
sample: {"acl": "None", "group": "network-operator"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
data = execute_show_command(command, module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_community(module, find_filter=None):
command = 'show snmp community'
data = execute_show_command(command, module)[0]
community_dict = {}
community_map = {
'grouporaccess': 'group',
'aclfilter': 'acl'
}
try:
community_table = data['TABLE_snmp_community']['ROW_snmp_community']
for each in community_table:
community = apply_key_map(community_map, each)
key = each['community_name']
community_dict[key] = community
except (KeyError, AttributeError):
return community_dict
if find_filter:
find = community_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="group not on switch."
"please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
cmds = flatten_list(commands)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_community(module, community)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
TobiasMue/paparazzi
|
refs/heads/master
|
sw/tools/stm32loader/stm32loader.py
|
79
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:si:et:enc=utf-8
# Author: Ivan A-R <ivan@tuxotronic.org>
# Project page: http://tuxotronic.org/wiki/projects/stm32loader
#
# This file is part of stm32loader.
#
# stm32loader is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# stm32loader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with stm32loader; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
import sys, getopt
import serial
import time
try:
from progressbar import *
usepbar = 1
except:
usepbar = 0
# Verbose level
QUIET = 20
def mdebug(level, message):
if(QUIET >= level):
print >> sys.stderr , message
class CmdException(Exception):
pass
class CommandInterface:
def open(self, aport='/dev/tty.usbserial-ftCYPMYJ', abaudrate=115200) :
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_EVEN,
stopbits=1,
xonxoff=0, # enable software flow control
rtscts=0, # disable RTS/CTS flow control
timeout=5 # set a timeout value, None for waiting forever
)
def _wait_for_ask(self, info = ""):
# wait for ask
try:
ask = ord(self.sp.read())
except:
raise CmdException("Can't read port or timeout")
else:
if ask == 0x79:
# ACK
return 1
else:
if ask == 0x1F:
# NACK
raise CmdException("NACK "+info)
else:
# Unknow responce
raise CmdException("Unknow response. "+info+": "+hex(ask))
def reset(self):
self.sp.setDTR(0)
time.sleep(0.1)
self.sp.setDTR(1)
time.sleep(0.5)
def initChip(self):
# Set boot
self.sp.setRTS(0)
self.reset()
self.sp.write("\x7F") # Syncro
return self._wait_for_ask("Syncro")
def releaseChip(self):
self.sp.setRTS(1)
self.reset()
def cmdGeneric(self, cmd):
self.sp.write(chr(cmd))
self.sp.write(chr(cmd ^ 0xFF)) # Control byte
return self._wait_for_ask(hex(cmd))
def cmdGet(self):
if self.cmdGeneric(0x00):
mdebug(10, "*** Get command");
len = ord(self.sp.read())
version = ord(self.sp.read())
mdebug(10, " Bootloader version: "+hex(version))
dat = map(lambda c: hex(ord(c)), self.sp.read(len))
mdebug(10, " Available commands: "+str(dat))
self._wait_for_ask("0x00 end")
return version
else:
raise CmdException("Get (0x00) failed")
def cmdGetVersion(self):
if self.cmdGeneric(0x01):
mdebug(10, "*** GetVersion command")
version = ord(self.sp.read())
self.sp.read(2)
self._wait_for_ask("0x01 end")
mdebug(10, " Bootloader version: "+hex(version))
return version
else:
raise CmdException("GetVersion (0x01) failed")
def cmdGetID(self):
if self.cmdGeneric(0x02):
mdebug(10, "*** GetID command")
len = ord(self.sp.read())
id = self.sp.read(len+1)
self._wait_for_ask("0x02 end")
return id
else:
raise CmdException("GetID (0x02) failed")
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
crc = byte0 ^ byte1 ^ byte2 ^ byte3
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3) + chr(crc))
def cmdReadMemory(self, addr, lng):
assert(lng <= 256)
if self.cmdGeneric(0x11):
mdebug(10, "*** ReadMemory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x11 address failed")
N = (lng - 1) & 0xFF
crc = N ^ 0xFF
self.sp.write(chr(N) + chr(crc))
self._wait_for_ask("0x11 length failed")
return map(lambda c: ord(c), self.sp.read(lng))
else:
raise CmdException("ReadMemory (0x11) failed")
def cmdGo(self, addr):
if self.cmdGeneric(0x21):
mdebug(10, "*** Go command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x21 go failed")
else:
raise CmdException("Go (0x21) failed")
def cmdWriteMemory(self, addr, data):
assert(len(data) <= 256)
if self.cmdGeneric(0x31):
mdebug(10, "*** Write memory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x31 address failed")
#map(lambda c: hex(ord(c)), data)
lng = (len(data)-1) & 0xFF
mdebug(10, " %s bytes to write" % [lng+1]);
self.sp.write(chr(lng)) # len really
crc = 0xFF
for c in data:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x31 programming failed")
mdebug(10, " Write memory done")
else:
raise CmdException("Write memory (0x31) failed")
def cmdEraseMemory(self, sectors = None):
if self.cmdGeneric(0x43):
mdebug(10, "*** Erase memory command")
if sectors is None:
# Global erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0x00))
else:
# Sectors erase
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x43 erasing failed")
mdebug(10, " Erase memory done")
else:
raise CmdException("Erase memory (0x43) failed")
def cmdWriteProtect(self, sectors):
if self.cmdGeneric(0x63):
mdebug(10, "*** Write protect command")
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x63 write protect failed")
mdebug(10, " Write protect done")
else:
raise CmdException("Write Protect memory (0x63) failed")
def cmdWriteUnprotect(self):
if self.cmdGeneric(0x73):
mdebug(10, "*** Write Unprotect command")
self._wait_for_ask("0x73 write unprotect failed")
self._wait_for_ask("0x73 write unprotect 2 failed")
mdebug(10, " Write Unprotect done")
else:
raise CmdException("Write Unprotect (0x73) failed")
def cmdReadoutProtect(self):
if self.cmdGeneric(0x82):
mdebug(10, "*** Readout protect command")
self._wait_for_ask("0x82 readout protect failed")
self._wait_for_ask("0x82 readout protect 2 failed")
mdebug(10, " Read protect done")
else:
raise CmdException("Readout protect (0x82) failed")
def cmdReadoutUnprotect(self):
if self.cmdGeneric(0x92):
mdebug(10, "*** Readout Unprotect command")
self._wait_for_ask("0x92 readout unprotect failed")
self._wait_for_ask("0x92 readout unprotect 2 failed")
mdebug(10, " Read Unprotect done")
else:
raise CmdException("Readout unprotect (0x92) failed")
# Complex commands section
def readMemory(self, addr, lng):
data = []
if usepbar:
widgets = ['Reading: ', Percentage(),', ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets,maxval=lng, term_width=79).start()
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, 256)
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, lng)
return data
def writeMemory(self, addr, data):
lng = len(data)
if usepbar:
widgets = ['Writing: ', Percentage(),' ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=lng, term_width=79).start()
offs = 0
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+256])
offs = offs + 256
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+lng] + ([0xFF] * (256-lng)) )
def __init__(self) :
pass
def usage():
print """Usage: %s [-hqVewvr] [-l length] [-p port] [-b baud] [-a addr] [file.bin]
-h This help
-q Quiet
-V Verbose
-e Erase
-w Write
-v Verify
-r Read
-l length Length of read
-p port Serial port (default: /dev/tty.usbserial-ftCYPMYJ)
-b baud Baud speed (default: 115200)
-a addr Target address
./stm32loader.py -e -w -v example/main.bin
""" % sys.argv[0]
if __name__ == "__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print "Using Psyco..."
except ImportError:
pass
conf = {
'port': '/dev/tty.usbserial-ftCYPMYJ',
'baud': 115200,
'address': 0x08000000,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:], "hqVewvrp:b:a:l:")
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
QUIET = 5
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h':
usage()
sys.exit(0)
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
elif o == '-a':
conf['address'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
else:
assert False, "unhandled option"
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'])
mdebug(10, "Open port %(port)s, baud %(baud)d" % {'port':conf['port'], 'baud':conf['baud']})
try:
try:
cmd.initChip()
except:
print "Can't init. Ensure that BOOT0 is enabled and reset device"
bootversion = cmd.cmdGet()
mdebug(0, "Bootloader version %X" % bootversion)
mdebug(0, "Chip id `%s'" % str(map(lambda c: hex(ord(c)), cmd.cmdGetID())))
# cmd.cmdGetVersion()
# cmd.cmdGetID()
# cmd.cmdReadoutUnprotect()
# cmd.cmdWriteUnprotect()
# cmd.cmdWriteProtect([0, 1])
if (conf['write'] or conf['verify']):
data = map(lambda c: ord(c), file(args[0]).read())
if conf['erase']:
cmd.cmdEraseMemory()
if conf['write']:
cmd.writeMemory(conf['address'], data)
if conf['verify']:
verify = cmd.readMemory(conf['address'], len(data))
if(data == verify):
print "Verification OK"
else:
print "Verification FAILED"
print str(len(data)) + ' vs ' + str(len(verify))
for i in xrange(0, len(data)):
if data[i] != verify[i]:
print hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i])
if not conf['write'] and conf['read']:
rdata = cmd.readMemory(conf['address'], conf['len'])
file(args[0], 'wb').write(''.join(map(chr,rdata)))
# cmd.cmdGo(addr + 0x04)
finally:
cmd.releaseChip()
|
Murillo/Hackerrank-Algorithms
|
refs/heads/master
|
Data Structures/Trees/tree-height-of-a-binary-tree.py
|
1
|
# Tree: Height of a Binary Tree
# Developer: Murillo Grubler
# https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem
# Time complexity: O(n)
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
'''
class Node:
def __init__(self,info):
self.info = info
self.left = None
self.right = None
// this is a node of the tree , which contains info as data, left , right
'''
def calculate_height(root, current_level, height):
height = height + 1 if current_level > height else height
if root.left is not None:
height = calculate_height(root.left, current_level + 1, height)
if root.right is not None:
height = calculate_height(root.right, current_level + 1, height)
return height
def height(root):
if root is None:
return 0
return calculate_height(root, 0, 0)
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
print(height(tree.root))
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
verticals/garage61/acy_stock_sent_number/__init__.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Acysos S.L. (http://acysos.com) All Rights Reserved.
# Ignacio Ibeas <ignacio@acysos.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock
import sale
|
DirtyUnicorns/android_external_chromium_org
|
refs/heads/lollipop
|
tools/update_reference_build.py
|
41
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Chrome reference builds.
Before running this script, you should first verify that you are authenticated
for SVN. You can do this by running:
$ svn ls svn://svn.chromium.org/chrome/trunk/deps/reference_builds
You may need to get your SVN password from https://chromium-access.appspot.com/.
Usage:
$ cd /tmp
$ /path/to/update_reference_build.py VERSION # e.g. 37.0.2062.94
$ cd reference_builds/reference_builds
$ gcl change
$ gcl upload <change>
$ gcl commit <change>
"""
import logging
import optparse
import os
import shutil
import subprocess
import sys
import time
import urllib
import urllib2
import zipfile
# Google storage location (no public web URL's), example:
# gs://chrome-unsigned/desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
CHROME_GS_URL_FMT = ('gs://chrome-unsigned/desktop-*/%s/%s/%s')
class BuildUpdater(object):
_CHROME_PLATFORM_FILES_MAP = {
'Win': [
'chrome-win.zip',
],
'Mac': [
'chrome-mac.zip',
],
'Linux': [
'chrome-precise32.zip',
],
'Linux_x64': [
'chrome-precise64.zip',
],
}
# Map of platform names to gs:// Chrome build names.
_BUILD_PLATFORM_MAP = {
'Linux': 'precise32',
'Linux_x64': 'precise64',
'Win': 'win',
'Mac': 'mac',
}
_PLATFORM_DEST_MAP = {
'Linux': 'chrome_linux',
'Linux_x64': 'chrome_linux64',
'Win': 'chrome_win',
'Mac': 'chrome_mac',
}
def __init__(self, version, options):
self._version = version
self._platforms = options.platforms.split(',')
@staticmethod
def _GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell)
stdout, stderr = p.communicate()
exit_code = p.returncode
if stderr:
logging.critical(stderr)
logging.info(stdout)
return (exit_code, stdout)
def _GetBuildUrl(self, platform, version, filename):
"""Returns the URL for fetching one file.
Args:
platform: Platform name, must be a key in |self._BUILD_PLATFORM_MAP|.
version: A Chrome version number, e.g. 30.0.1600.1.
filename: Name of the file to fetch.
Returns:
The URL for fetching a file. This may be a GS or HTTP URL.
"""
return CHROME_GS_URL_FMT % (
version, self._BUILD_PLATFORM_MAP[platform], filename)
def _FindBuildVersion(self, platform, version, filename):
"""Searches for a version where a filename can be found.
Args:
platform: Platform name.
version: A Chrome version number, e.g. 30.0.1600.1.
filename: Filename to look for.
Returns:
A version where the file could be found, or None.
"""
# TODO(shadi): Iterate over official versions to find a valid one.
return (version
if self._DoesBuildExist(platform, version, filename) else None)
def _DoesBuildExist(self, platform, version, filename):
"""Checks whether a file can be found for the given Chrome version.
Args:
platform: Platform name.
version: Chrome version number, e.g. 30.0.1600.1.
filename: Filename to look for.
Returns:
True if the file could be found, False otherwise.
"""
url = self._GetBuildUrl(platform, version, filename)
return self._DoesGSFileExist(url)
def _DoesGSFileExist(self, gs_file_name):
"""Returns True if the GS file can be found, False otherwise."""
exit_code = BuildUpdater._GetCmdStatusAndOutput(
['gsutil', 'ls', gs_file_name])[0]
return not exit_code
def _GetPlatformFiles(self, platform):
"""Returns a list of filenames to fetch for the given platform."""
return BuildUpdater._CHROME_PLATFORM_FILES_MAP[platform]
def _DownloadBuilds(self):
for platform in self._platforms:
for filename in self._GetPlatformFiles(platform):
output = os.path.join('dl', platform,
'%s_%s_%s' % (platform,
self._version,
filename))
if os.path.exists(output):
logging.info('%s alread exists, skipping download', output)
continue
version = self._FindBuildVersion(platform, self._version, filename)
if not version:
logging.critical('Failed to find %s build for r%s\n', platform,
self._version)
sys.exit(1)
dirname = os.path.dirname(output)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
url = self._GetBuildUrl(platform, version, filename)
self._DownloadFile(url, output)
def _DownloadFile(self, url, output):
logging.info('Downloading %s, saving to %s', url, output)
BuildUpdater._GetCmdStatusAndOutput(['gsutil', 'cp', url, output])
def _FetchSvnRepos(self):
if not os.path.exists('reference_builds'):
os.makedirs('reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'config',
'svn://svn.chromium.org/chrome/trunk/deps/reference_builds'],
'reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'sync'], 'reference_builds')
def _UnzipFile(self, dl_file, dest_dir):
"""Unzips a file if it is a zip file.
Args:
dl_file: The downloaded file to unzip.
dest_dir: The destination directory to unzip to.
Returns:
True if the file was unzipped. False if it wasn't a zip file.
"""
if not zipfile.is_zipfile(dl_file):
return False
logging.info('Opening %s', dl_file)
with zipfile.ZipFile(dl_file, 'r') as z:
for content in z.namelist():
dest = os.path.join(dest_dir, content[content.find('/')+1:])
# Create dest parent dir if it does not exist.
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
# If dest is just a dir listing, do nothing.
if not os.path.basename(dest):
continue
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with z.open(content) as unzipped_content:
logging.info('Extracting %s to %s (%s)', content, dest, dl_file)
with file(dest, 'wb') as dest_file:
dest_file.write(unzipped_content.read())
permissions = z.getinfo(content).external_attr >> 16
if permissions:
os.chmod(dest, permissions)
return True
def _ClearDir(self, dir):
"""Clears all files in |dir| except for hidden files and folders."""
for root, dirs, files in os.walk(dir):
# Skip hidden files and folders (like .svn and .git).
files = [f for f in files if f[0] != '.']
dirs[:] = [d for d in dirs if d[0] != '.']
for f in files:
os.remove(os.path.join(root, f))
def _ExtractBuilds(self):
for platform in self._platforms:
if os.path.exists('tmp_unzip'):
os.path.unlink('tmp_unzip')
dest_dir = os.path.join('reference_builds', 'reference_builds',
BuildUpdater._PLATFORM_DEST_MAP[platform])
self._ClearDir(dest_dir)
for root, _, dl_files in os.walk(os.path.join('dl', platform)):
for dl_file in dl_files:
dl_file = os.path.join(root, dl_file)
if not self._UnzipFile(dl_file, dest_dir):
logging.info('Copying %s to %s', dl_file, dest_dir)
shutil.copy(dl_file, dest_dir)
def _SvnAddAndRemove(self):
svn_dir = os.path.join('reference_builds', 'reference_builds')
# List all changes without ignoring any files.
stat = BuildUpdater._GetCmdStatusAndOutput(['svn', 'stat', '--no-ignore'],
svn_dir)[1]
for line in stat.splitlines():
action, filename = line.split(None, 1)
# Add new and ignored files.
if action == '?' or action == 'I':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'add', filename], svn_dir)
elif action == '!':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'delete', filename], svn_dir)
filepath = os.path.join(svn_dir, filename)
if not os.path.isdir(filepath) and os.access(filepath, os.X_OK):
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'propset', 'svn:executable', 'true', filename], svn_dir)
def DownloadAndUpdateBuilds(self):
self._DownloadBuilds()
self._FetchSvnRepos()
self._ExtractBuilds()
self._SvnAddAndRemove()
def ParseOptions(argv):
parser = optparse.OptionParser()
parser.set_usage('Usage: %prog VERSION [-p PLATFORMS]')
parser.add_option('-p', dest='platforms',
default='Win,Mac,Linux,Linux_x64',
help='Comma separated list of platforms to download '
'(as defined by the chromium builders).')
options, args = parser.parse_args(argv)
if len(args) != 2:
parser.print_help()
sys.exit(1)
version = args[1]
return version, options
def main(argv):
logging.getLogger().setLevel(logging.DEBUG)
version, options = ParseOptions(argv)
b = BuildUpdater(version, options)
b.DownloadAndUpdateBuilds()
logging.info('Successfully updated reference builds. Move to '
'reference_builds/reference_builds and make a change with gcl.')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Pexego/alimentacion
|
refs/heads/7.0
|
rappel_management/rappel.py
|
2
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <marta@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import time
class rappel(osv.osv):
_name = 'rappel'
_columns = {
'name': fields.char('Name', size=60, required=True,readonly=True,states={'draft': [('readonly', False)]}),
'date_start': fields.date('Start Date', required=True,readonly=True,states={'draft': [('readonly', False)]}),
'date_stop': fields.date('Stop Date', required=True,readonly=True,states={'draft': [('readonly', False)]}),
'line_ids': fields.one2many('rappel.line','rappel_id', 'Rappel Lines',readonly=True,states={'draft': [('readonly', False)]}),
'state' : fields.selection([('draft','Draft'),('open','Open'),('cancel','Canceled'),('done','Done')],'State',readonly=True),
'journal_id': fields.many2one('account.journal', 'Refund Journal', domain="[('type','=','sale_refund')]", required=True,readonly=True,states={'draft': [('readonly', False)]}),
}
_defaults = {
'state': lambda *args: 'draft'
}
def action_open(self, cr, uid, ids, *args):
return True
def action_done(self, cr, uid, ids,group=True,type='out_refund', context=None):
# need to make perfect checking
# remaining to check sale condition
# need Improvement
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
partner_obj = self.pool.get('res.partner')
for rappel in self.browse(cr, uid, ids):
for line in rappel.line_ids:
qty = 0.0
price_unit = 0.0
query_params = (rappel.id,rappel.date_start,rappel.date_stop,)
query_cond = ""
if line.condition_product_id:
query_cond += " AND inv_line.product_id = %s"
query_params += (line.condition_product_id.id,)
if line.condition_category_id:
query_cond += " AND prod_template.categ_id = %s"
query_params += (line.condition_category_id.id,)
cr.execute("""
SELECT max(invoice.id), sum(inv_line.quantity), sum(inv_line.price_subtotal)
FROM account_invoice invoice
LEFT JOIN res_partner partner ON (invoice.partner_id = partner.id)
LEFT JOIN account_invoice_line inv_line ON (invoice.id = inv_line.invoice_id)
LEFT JOIN product_product product ON (inv_line.product_id = product.id)
LEFT JOIN product_template prod_template ON (product.product_tmpl_id = prod_template.id)
WHERE partner.rappel_id = %s
AND (invoice.date_invoice BETWEEN %s AND %s)
AND invoice.type = 'out_invoice'
AND invoice.state in ('open','paid')
""" + query_cond + """
GROUP BY partner.id
""", query_params)
for res in cr.fetchall():
if (line.qty_amount == 'qty' and res[1] >= line.condition_qty) or (line.qty_amount == 'amount' and res[2] >= line.condition_amount):
if line.qty_amount == 'qty':
qty = res[1] * line.discount / 100
price_unit = res[2] / res[1]
elif line.qty_amount == 'amount':
price_unit = res[2] * line.discount / 100
qty = 1.0
invoice_record = invoice_obj.browse(cr, uid, res[0], context)
new_invoice = {}
partner_id = invoice_record.partner_id.id
fpos = partner_obj.browse(cr, uid, partner_id).property_account_position
new_invoice.update({
'partner_id': partner_id,
'journal_id': rappel.journal_id.id,
'account_id': invoice_record.partner_id.property_account_receivable.id,
'address_contact_id': invoice_record.address_contact_id.id,
'address_invoice_id': invoice_record.address_invoice_id.id,
'type': 'out_refund',
'date_invoice': time.strftime('%Y-%m-%d'),
'state': 'draft',
'number': False,
'fiscal_position': fpos and fpos.id or False
})
invoice_id = invoice_obj.create(cr, uid, new_invoice,context=context)
account_id = line.condition_product_id and (line.condition_product_id.property_account_income and line.condition_product_id.property_account_income.id or (line.condition_product_id.categ_id.property_account_income_categ and line.condition_product_id.categ_id.property_account_income_categ.id or False)) or (line.condition_category_id.property_account_income_categ and line.condition_category_id.property_account_income_categ.id or False)
if not account_id:
account_id = rappel.journal_id.default_debit_account_id and rappel.journal_id.default_debit_account_id.id or False,
if not account_id:
raise osv.except_osv(_('No account found'),_("OpenERP was not able to find an income account to put on the refund invoice line. Configure the default debit account on the selected refund journal."))
invoice_line_id = invoice_line_obj.create(cr, uid, {
'name': line.name,
'invoice_id': invoice_id,
'product_id': line.condition_product_id and line.condition_product_id.id or False,
'uos_id': line.condition_product_id and line.condition_product_id.uom_id.id or False,
'account_id': account_id,
'price_unit': price_unit,
'quantity': qty })
return True
rappel()
class rappel_line(osv.osv):
_name = 'rappel.line'
_columns = {
'name': fields.char('Name', size=60, required=True),
'sequence': fields.integer('Sequence', required=True),
'condition_category_id': fields.many2one('product.category', 'Category'),
'condition_product_id' : fields.many2one('product.product', 'Product'),
'qty_amount': fields.selection([('qty', 'By Qty.'),('amount', 'By Amount')], 'Settled', required=True),
'condition_amount' : fields.float('Min. Amount', required=True),
'condition_qty' : fields.float('Min. Quantity', required=True),
'discount' : fields.float('Discount (%)'),
'rappel_id': fields.many2one('rappel', 'Rappel'),
}
_defaults = {
'sequence': lambda *a: 5,
'condition_amount': lambda *a: 1.0,
'condition_qty': lambda *a: 1.0
}
rappel_line()
|
afbarnard/esal
|
refs/heads/master
|
esal/test/stories_test.py
|
1
|
# Tests features from stories
#
# Copyright (c) 2015 Aubrey Barnard. This is free software. See
# LICENSE for details.
import itertools as itools
import unittest
from . import data
from .. import engine
from .. import events
from .. import sequences
from .. import streams
# Selection predicates
def _has_warfarin(event):
return event.typ == 'd_warfarin'
def _is_unobserved(event):
return event.val is None
class StoryTests(unittest.TestCase):
def test_story001(self):
# Count all events
self.assertEqual(len(data.med_events),
engine.count(data.med_events))
# Count selected warfarin events
actual = engine.count(
engine.select(data.med_events, _has_warfarin))
self.assertEqual(2, actual)
def test_story002(self):
# Count distinct patients (sequence ID is field 0)
actual = engine.count(
engine.distinct(engine.project(data.med_events, (0,))))
self.assertEqual(10, actual)
# Count distinct event types (event type is field 3)
actual = engine.count(
engine.distinct(engine.project(data.med_events, (3,))))
self.assertEqual(len(data.drugs) + len(data.conds), actual)
def test_story003(self):
# Count distinct patients that have had warfarin
actual = engine.count(engine.distinct(engine.project(
engine.select(data.med_events, _has_warfarin),
(0,))))
self.assertEqual(2, actual)
# Count distinct types of unobserved events
actual = engine.count(engine.distinct(engine.project(
engine.select(data.med_events, _is_unobserved),
(3,))))
self.assertEqual(13, actual)
def test_story004(self):
# Order events in reverse by their name and then strip times to
# convert to sequences. Number events starting at 1.
rev_evs_to_seqs = sequences.make_timeline_to_sequence_flattener(
ordering=lambda evs: sorted(
evs, key=lambda e: e.typ, reverse=True),
start=1)
seqs = (
data.seq_concurrent_events,
data.seq_sorted,
data.seq_rand1_08,
)
index_maps = (
(2, 1, 0, 6, 5, 4, 3, 8, 7, 13, 12, 11, 10, 9),
(1, 2, 0, 3, 4, 6, 5, 9, 8, 7, 10),
(1, 0, 4, 3, 2, 5, 6, 7),
)
expected = []
for seq_idx, seq in enumerate(seqs):
for ev_num, ev_idx in enumerate(index_maps[seq_idx], 1):
ev = seq[ev_idx]
expected.append(events.Event(
ev.seq, ev_num, ev.end, ev.typ, ev.val))
actual = list(streams.map_sequences_as_events(
rev_evs_to_seqs, itools.chain.from_iterable(seqs)))
self.assertEqual(expected, actual)
|
jvkops/titanium_mobile
|
refs/heads/master
|
support/common/simplejson/tests/test_check_circular.py
|
414
|
from unittest import TestCase
import simplejson as json
def default_iterable(obj):
return list(obj)
class TestCheckCircular(TestCase):
def test_circular_dict(self):
dct = {}
dct['a'] = dct
self.assertRaises(ValueError, json.dumps, dct)
def test_circular_list(self):
lst = []
lst.append(lst)
self.assertRaises(ValueError, json.dumps, lst)
def test_circular_composite(self):
dct2 = {}
dct2['a'] = []
dct2['a'].append(dct2)
self.assertRaises(ValueError, json.dumps, dct2)
def test_circular_default(self):
json.dumps([set()], default=default_iterable)
self.assertRaises(TypeError, json.dumps, [set()])
def test_circular_off_default(self):
json.dumps([set()], default=default_iterable, check_circular=False)
self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)
|
guymakam/Kodi-Israel
|
refs/heads/master
|
plugin.video.MakoTV/resources/lib/crypto/cipher/icedoll.py
|
7
|
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.icedoll
Modification of Rijndael to provide infinite error extension.
The ith round of Rijndael is tapped and used to process the
subsequent block.
Changes to base Rijndael are marked with: '# --------------------------'
For Rijndael with N rounds, normally ECB mode is C[i] = Ek(N,P[i])
Modification is:
Fi = Ek(t,P[i-1]) ; Fi, with i=0 is nonce or a fixed value
C[i] = Fi^Ek(N,P[i]^Fi)
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
June 2002
February 2003 -> discovered Ron Rivest's "Tweakable Block Ciphers"
http://theory.lcs.mit.edu/~rivest/publications.html
These are about the same concept ....
"""
from crypto.cipher.base import BlockCipherWithIntegrity, padWithPadLen, noPadding
from crypto.cipher.rijndael import *
from binascii_plus import b2a_hex
from copy import deepcopy
class Icedoll(Rijndael):
""" IceDoll encryption algorithm
based on Rijndael, with added feedback for better integrity processing.
Note - no integrity check is built into Icedoll directly
"""
def __init__(self,key=None,padding=padWithPadLen(),keySize=16,blockSize=16,tapRound=6,extraRounds=6):
""" key, keysize, blockSize same as Rijndael, tapROund is feedback tap, """
self.tapRound = tapRound # <------- !!! change from Rijndael !!!
self.extraRounds = extraRounds # <------- !!! change from Rijndael !!!
self.name = 'ICEDOLL'
self.keySize = keySize
self.strength = keySize
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk]+extraRounds # <------- !!! change from Rijndael !!!
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
if self.encryptBlockCount == 0: # first call, set frdd back
self.priorFeedBack = self._toBlock(chr(0)*(4*self.Nb)) # <------- !!! change from Rijndael !!!
AddRoundKey(self, self.priorFeedBack) # <------- !!! change from Rijndael !!!
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
if round == self.tapRound:
nextFeedBack = deepcopy(self.state) # <------- !!! change from Rijndael !!!
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
AddRoundKey(self, self.priorFeedBack) # <------- !!! change from Rijndael !!!
self.priorFeedBack = nextFeedBack # <------- !!! change from Rijndael !!!
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
if self.decryptBlockCount == 0: # first call, set frdd back
self.priorFeedBack = self._toBlock( chr(0)*(4*self.Nb) ) # <------- !!! change from Rijndael !!!
AddRoundKey(self, self.priorFeedBack) # <------- !!! change from Rijndael !!!
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
if round == self.tapRound:
nextFeedBack = deepcopy(self.state) # <------- !!! change from Rijndael !!!
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
AddRoundKey(self, self.priorFeedBack) # <------- !!! change from Rijndael !!!
self.priorFeedBack = nextFeedBack # <------- !!! change from Rijndael !!!
return self._toBString(self.state)
|
saurabh6790/pow-app
|
refs/heads/master
|
utilities/doctype/address/templates/pages/addresses.py
|
60
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
no_cache = True
|
sparkslabs/kamaelia_
|
refs/heads/master
|
Sketches/PT/BouncingCatGame/likesprite.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Proper likefile control of a sprite handler
#
from likefile import LikeFile, schedulerThread
import time, Axon, os, random, pygame, math
from Sprites.BasicSprite import BasicSprite
from Sprites.SpriteScheduler import SpriteScheduler
from Kamaelia.UI.Pygame.EventHandler import EventHandler
from Simplegame import cat_location, screensize, border, background, screen_surface, randomFromRangeExcludingZero
bg = schedulerThread(slowmo=0.01).start()
global spritescheduler
class MyGamesEvents(EventHandler):
def __init__(self, cat_args, trace=1, ):
self.trace = 0
self.cat_args = cat_args
def keydown(self, unicode, key, mod, where):
if key == 113: # "Q"
raise "QUIT"
class CatSprite(BasicSprite):
def main(self):
spritescheduler.allsprites.add(self)
while True:
self.pause()
yield 1
def make_cat(cat_location, screensize, border):
# Get the cat again!
files = list()
for x in os.listdir("pictures"):
if x not in ("README","CVS",".svn"):
files.append(x)
image_location = files[random.randint(0,len(files)-1)]
cat_surface = pygame.image.load("pictures/"+image_location)
cat = cat_surface.convert()
cat.set_colorkey((255,255,255), pygame.RLEACCEL)
newCat = CatSprite(image=cat)
return newCat
cat_args = (cat_location, screensize, border)
spritescheduler = SpriteScheduler(cat_args, [], background, screen_surface, MyGamesEvents).activate()
newcat = make_cat(*cat_args)
the_sun = LikeFile(make_cat(*cat_args), extrainboxes = ("translation", "imaging"))
the_sun.activate()
planet = LikeFile(make_cat(*cat_args), extrainboxes = ("translation", "rotator", "imaging"))
planet.activate()
sun_position = tuple([x/2 for x in screensize])
planet_position = (screensize[0]/4.0, screensize[1]/2)
planet_velocity = (0.0, 10)
# ugh, I should be using numpy but it works, that's the important thing
# This is merely a test of likefile. Really, kamaelia components should be written for a physics simulation like this.
def acceleration(pos_planet, pos_sun):
g = 200 # fudge factor
# F = ma, but F is proportional to distance ** -2
# neatly removing the need to calculate a square root for the distance
direction = (pos_planet[0] - pos_sun[0], pos_planet[1] - pos_sun[1])
magnitude = direction[0] ** 2 + direction[1] ** 2
return tuple([g * x/magnitude for x in direction])
def apply_acceleration_to_velocity(velocity, accn):
return (velocity[0] + accn[0], velocity[1] + accn[1])
def apply_velocity_to_position(position, velocity):
return (position[0] + velocity[0], position[1] + velocity[1])
the_sun.put(sun_position, "translation")
while True:
time.sleep(0.01)
planet.put(planet_position, "translation")
accn = acceleration(sun_position, planet_position)
planet_velocity = apply_acceleration_to_velocity(planet_velocity, accn)
planet_position = apply_velocity_to_position(planet_position, planet_velocity)
time.sleep(5)
|
jefftc/changlab
|
refs/heads/master
|
web2py/gluon/storage.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
import cPickle
import portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
# http://stackoverflow.com/questions/5247250/why-does-pickle-getstate-accept-as-a-return-value-the-very-instance-it-requi
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""
Return a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, [value] will be returned.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
return value if not value else \
value if isinstance(value, (list, tuple)) else [value]
def getfirst(self, key, default=None):
"""
Return the first or only value when given a request.vars-style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""
Returns the last or only single value when
given a request.vars-style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
PICKABLE = (str, int, long, float, bool, list, dict, tuple, set)
class StorageList(Storage):
"""
like Storage but missing elements default to [] instead of None
"""
def __getitem__(self, key):
return self.__getattr__(key)
def __getattr__(self, key):
if key in self:
return getattr(self, key)
else:
r = []
setattr(self, key, r)
return r
def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
storage = cPickle.load(fp)
finally:
if fp:
fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
cPickle.dump(dict(storage), fp)
finally:
if fp:
fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self['lock_keys'] and key not in self:
raise SyntaxError('setting key \'%s\' does not exist' % key)
if key != 'lock_values' and self['lock_values']:
raise SyntaxError('setting value cannot be changed: %s' % key)
self[key] = value
class Messages(Settings):
def __init__(self, T):
Storage.__init__(self, T=T)
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return str(self.T(value))
return value
class FastStorage(dict):
"""
Eventually this should replace class Storage but causes memory leak
because of http://bugs.python.org/issue1469629
>>> s = FastStorage()
>>> s.a = 1
>>> s.a
1
>>> s['a']
1
>>> s.b
>>> s['b']
>>> s['b']=2
>>> s['b']
2
>>> s.b
2
>>> isinstance(s,dict)
True
>>> dict(s)
{'a': 1, 'b': 2}
>>> dict(FastStorage(s))
{'a': 1, 'b': 2}
>>> import pickle
>>> s = pickle.loads(pickle.dumps(s))
>>> dict(s)
{'a': 1, 'b': 2}
>>> del s.b
>>> del s.a
>>> s.a
>>> s.b
>>> s['a']
>>> s['b']
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __getattr__(self, key):
return getattr(self, key) if key in self else None
def __getitem__(self, key):
return dict.get(self, key, None)
def copy(self):
self.__dict__ = {}
s = FastStorage(self)
self.__dict__ = self
return s
def __repr__(self):
return '<Storage %s>' % dict.__repr__(self)
def __getstate__(self):
return dict(self)
def __setstate__(self, sdict):
dict.__init__(self, sdict)
self.__dict__ = self
def update(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds return None
instead of IndexOutOfBounds
"""
def __call__(self, i, default=None, cast=None, otherwise=None):
"""
request.args(0,default=0,cast=int,otherwise='http://error_url')
request.args(0,default=0,cast=int,otherwise=lambda:...)
"""
n = len(self)
if 0 <= i < n or -n <= i < 0:
value = self[i]
else:
value = default
if cast:
try:
value = cast(value)
except (ValueError, TypeError):
from http import HTTP, redirect
if otherwise is None:
raise HTTP(404)
elif isinstance(otherwise, str):
redirect(otherwise)
elif callable(otherwise):
return otherwise()
else:
raise RuntimeError("invalid otherwise")
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
baroquebobcat/pants
|
refs/heads/master
|
contrib/go/src/python/pants/contrib/go/tasks/go_compile.py
|
1
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import os
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method
from pants.util.strutil import safe_shlex_split
from pants.contrib.go.targets.go_target import GoTarget
from pants.contrib.go.tasks.go_binary_fingerprint_strategy import GoBinaryFingerprintStrategy
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoCompile(GoWorkspaceTask):
"""Compiles a Go package into either a library binary or executable binary.
GoCompile will populate the "bin/" and "pkg/" directories of each target's Go
workspace (see GoWorkspaceTask) with executables and library binaries respectively.
"""
@classmethod
def implementation_version(cls):
return super(GoCompile, cls).implementation_version() + [('GoCompile', 1)]
@classmethod
def register_options(cls, register):
super(GoCompile, cls).register_options(register)
# Build flags fingerprint is handled by a custom strategy to enable
# merging with task-specific flags.
register('--build-flags', default='', help='Build flags to pass to Go compiler.')
@classmethod
def product_types(cls):
return ['exec_binary', 'deployable_archives']
def execute(self):
get_build_flags_func = functools.partial(self._get_build_flags,
self.get_options().build_flags,
self.get_options().is_flagged('build_flags'))
fingerprint_strategy = GoBinaryFingerprintStrategy(get_build_flags_func)
self.context.products.safe_create_data('exec_binary', lambda: {})
with self.invalidated(self.context.targets(self.is_go),
invalidate_dependents=True,
fingerprint_strategy=fingerprint_strategy,
topological_order=True) as invalidation_check:
# Maps each local/remote library target to its compiled binary.
lib_binary_map = {}
go_exec_binary = self.context.products.get_data('exec_binary')
go_deployable_archive = self.context.products.get('deployable_archives')
for vt in invalidation_check.all_vts:
gopath = self.get_gopath(vt.target)
if not isinstance(vt.target, GoTarget):
continue
if not vt.valid:
self.ensure_workspace(vt.target)
self._sync_binary_dep_links(vt.target, gopath, lib_binary_map)
build_flags = get_build_flags_func(vt.target)
self._go_install(vt.target, gopath, build_flags)
if self.is_binary(vt.target):
subdir, extension = self._get_cross_compiling_subdir_and_extension(gopath)
binary_path = os.path.join(gopath, 'bin', subdir, os.path.basename(vt.target.address.spec_path) + extension)
go_exec_binary[vt.target] = binary_path
go_deployable_archive.add(vt.target, os.path.dirname(binary_path)).append(os.path.basename(binary_path))
else:
lib_binary_map[vt.target] = os.path.join(gopath, 'pkg', self.goos_goarch,
vt.target.import_path + '.a')
@classmethod
@memoized_method
def _get_build_flags(cls, build_flags_from_option, is_flagged, target):
"""Merge build flags with global < target < command-line order
Build flags can be defined as globals (in `pants.ini`), as arguments to a Target, and
via the command-line.
"""
# If self.get_options().build_flags returns a quoted string, remove the outer quotes,
# which happens for flags passed from the command-line.
if (build_flags_from_option.startswith('\'') and build_flags_from_option.endswith('\'')) or \
(build_flags_from_option.startswith('"') and build_flags_from_option.endswith('"')):
bfo = build_flags_from_option[1:-1]
else:
bfo = build_flags_from_option
global_build_flags, ephemeral_build_flags = ('', bfo) if is_flagged else (bfo, '')
target_build_flags = target.build_flags if getattr(target, 'build_flags', None) else ''
joined_build_flags = ' '.join([global_build_flags, target_build_flags, ephemeral_build_flags])
return cls._split_build_flags(joined_build_flags)
@staticmethod
def _split_build_flags(build_flags):
return safe_shlex_split(build_flags) # Visible for testing
def _go_install(self, target, gopath, build_flags):
"""Create and execute a `go install` command."""
args = build_flags + [target.import_path]
result, go_cmd = self.go_dist.execute_go_cmd(
'install', gopath=gopath, args=args,
workunit_factory=self.context.new_workunit,
workunit_name='install {}'.format(target.import_path),
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
def _sync_binary_dep_links(self, target, gopath, lib_binary_map):
"""Syncs symlinks under gopath to the library binaries of target's transitive dependencies.
:param Target target: Target whose transitive dependencies must be linked.
:param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links
to library binaries.
:param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the
path of the compiled binary (the ".a" file) of the
library.
Required links to binary dependencies under gopath's "pkg/" dir are either created if
non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing
links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target
are deleted.
"""
required_links = set()
for dep in target.closure():
if dep == target:
continue
if not isinstance(dep, GoTarget):
continue
lib_binary = lib_binary_map[dep]
lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep)))
safe_mkdir(os.path.dirname(lib_binary_link))
if os.path.islink(lib_binary_link):
if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime:
# The binary under the link was updated after the link was created. Refresh
# the link so the mtime (modification time) of the link is greater than the
# mtime of the binary. This stops Go from needlessly re-compiling the library.
os.unlink(lib_binary_link)
os.symlink(lib_binary, lib_binary_link)
else:
os.symlink(lib_binary, lib_binary_link)
required_links.add(lib_binary_link)
self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
def _get_cross_compiling_subdir_and_extension(self, gopath):
# Note that environment variables don't invalidate the build graph, so changes to GOOS or GOARCH
# require a clean-all.
host_goos = self.go_dist.create_go_cmd('env', gopath=gopath, args=["GOHOSTOS"]).check_output().strip()
target_goos = self.go_dist.create_go_cmd('env', gopath=gopath, args=["GOOS"]).check_output().strip()
host_arch = self.go_dist.create_go_cmd('env', gopath=gopath, args=["GOARCH"]).check_output().strip()
target_arch = self.go_dist.create_go_cmd('env', gopath=gopath, args=["GOHOSTARCH"]).check_output().strip()
host_pair = "{}_{}".format(host_goos, host_arch)
target_pair = "{}_{}".format(target_goos, target_arch)
ext = ".exe" if target_goos == "windows" else ""
if host_pair != target_pair:
return (target_pair, ext)
return (".", ext)
|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/Assert.py
|
83
|
assert isinstance(s, basestring), isinstance(x, int)
|
sayoun/workalendar
|
refs/heads/master
|
workalendar/usa/nevada.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from workalendar.core import FRI
from .core import UnitedStates
class Nevada(UnitedStates):
"""Nevada"""
include_thanksgiving_friday = True
thanksgiving_friday_label = "Family Day"
include_columbus_day = False
def get_variable_days(self, year):
days = super(Nevada, self).get_variable_days(year)
days.append(
(self.get_last_weekday_in_month(year, 10, FRI), "Nevada Day")
)
return days
|
jeremycline/pulp
|
refs/heads/master
|
server/test/unit/server/managers/auth/user/test_cud.py
|
8
|
import mock
from ..... import base
from pulp.server.auth.ldap_connection import LDAPConnection
from pulp.server.db.model.auth import User, Role
from pulp.server.db.model.criteria import Criteria
from pulp.server.managers import factory as manager_factory
from pulp.server.managers.auth.cert.cert_generator import SerialNumber
from pulp.server.managers.auth.role.cud import SUPER_USER_ROLE
import pulp.server.exceptions as exceptions
class UserManagerTests(base.PulpServerTests):
def setUp(self):
super(UserManagerTests, self).setUp()
# Hardcoded to /var/lib/pulp, so change here to avoid permissions issues
self.default_sn_path = SerialNumber.PATH
SerialNumber.PATH = '/tmp/sn.dat'
sn = SerialNumber()
sn.reset()
self.user_manager = manager_factory.user_manager()
self.user_query_manager = manager_factory.user_query_manager()
self.role_manager = manager_factory.role_manager()
self.cert_generation_manager = manager_factory.cert_generation_manager()
def tearDown(self):
super(UserManagerTests, self).tearDown()
SerialNumber.PATH = self.default_sn_path
def clean(self):
base.PulpServerTests.clean(self)
User.get_collection().remove()
Role.get_collection().remove()
def _test_generate_user_certificate(self):
# Setup
admin_user = self.user_manager.create_user('test-admin')
# pretend the user is logged in
manager_factory.principal_manager().set_principal(admin_user)
# Test
cert = self.user_manager.generate_user_certificate()
# Verify
self.assertTrue(cert is not None)
certificate = manager_factory.certificate_manager(content=cert)
cn = certificate.subject()['CN']
username, id = self.cert_generation_manager.decode_admin_user(cn)
self.assertEqual(username, admin_user['login'])
self.assertEqual(id, admin_user['id'])
def test_create(self):
# Setup
login = 'login-test'
clear_txt_pass = 'some password'
# Test
user = self.user_manager.create_user(login, clear_txt_pass, name="King of the World",
roles=['test-role'])
# Verify
self.assertTrue(user is not None)
user = self.user_query_manager.find_by_login(login)
self.assertTrue(user is not None)
self.assertNotEqual(clear_txt_pass, user['password'])
def test_duplicate(self):
# Setup
login = 'dupe-test'
clear_txt_pass = 'some password'
self.user_manager.create_user(login, clear_txt_pass)
# Test and verify
try:
self.user_manager.create_user(login, clear_txt_pass)
self.fail('User with an existing login did not raise an exception')
except exceptions.DuplicateResource, e:
self.assertTrue(login in e)
def test_user_list(self):
# Setup
login = 'login-test'
password = 'some password'
self.user_manager.create_user(login, password)
# Test
users = self.user_query_manager.find_all()
# Verify
self.assertTrue(len(users) == 1)
def test_delete(self):
# Setup
login = 'login-test'
password = 'some password'
user = self.user_manager.create_user(login, password)
# test
self.user_manager.delete_user(login)
# Verify
user = self.user_query_manager.find_by_login(login)
self.assertTrue(user is None)
def test_delete_last_superuser(self):
# Setup
login = 'admin'
password = 'admin'
# test
self.role_manager.create_role(role_id=SUPER_USER_ROLE)
self.user_manager.create_user(login, password, roles=[SUPER_USER_ROLE])
try:
self.user_manager.delete_user(login)
self.fail('Last superuser delete did not raise an exception')
except exceptions.PulpDataException, e:
self.assertTrue("last superuser" in str(e))
self.assertTrue(login in str(e))
# Verify
user = self.user_query_manager.find_by_login(login)
self.assertTrue(user is not None)
def test_update_password(self):
# Setup
login = 'login-test'
password = 'some password'
user = self.user_manager.create_user(login, password)
# Test
changed_password = 'some other password'
d = dict(password=changed_password)
user = self.user_manager.update_user(login, delta=d)
# Verify
user = self.user_query_manager.find_by_login(login)
self.assertTrue(user is not None)
self.assertTrue(user['password'] is not None)
self.assertNotEqual(changed_password, user['password'])
@mock.patch('pulp.server.db.connection.PulpCollection.query')
def test_find_by_criteria(self, mock_query):
criteria = Criteria()
self.user_query_manager.find_by_criteria(criteria)
mock_query.assert_called_once_with(criteria)
def test_add_user_from_ldap(self):
ldap_connection = LDAPConnection()
ldap_login = 'test-ldap-login'
ldap_name = 'test-ldap-name'
user = ldap_connection._add_from_ldap(username=ldap_login, userdata=({},
{'gecos': ldap_name}))
self.assertEqual(user['login'], ldap_login)
self.assertEqual(user['name'], ldap_name)
def test_add_user_from_ldap_unsupported_gecos(self):
# Make sure that if gecos is not a basestring with user's name in it, we default it to user
# login without raising any error
ldap_connection = LDAPConnection()
ldap_login = 'test-ldap-login'
ldap_gecos = ['blah', 'blah']
user = ldap_connection._add_from_ldap(username=ldap_login, userdata=({},
{'gecos': ldap_gecos}))
self.assertEqual(user['login'], ldap_login)
self.assertEqual(user['name'], ldap_login)
def test_get_admins(self):
# Setup
self.role_manager.create_role(role_id=SUPER_USER_ROLE)
self.user_manager.create_user('admin', 'hunter2', roles=[SUPER_USER_ROLE])
# Test
admins = self.user_manager.get_admins()
self.assertEquals(len(admins), 1)
self.assertEquals(admins[0]['name'], 'admin')
def test_get_admins_no_admins(self):
self.assertEquals(self.user_manager.get_admins(), None)
|
rmk135/objects
|
refs/heads/master
|
examples/miniapps/django/githubnavigator/__init__.py
|
2
|
"""Project package."""
from .containers import Container
from . import settings
container = Container()
container.config.from_dict(settings.__dict__)
|
goodwinnk/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/se/utils.py
|
108
|
import re
import datetime
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the
"Luhn"-algoritm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s / 10) + 1) * 10) - s
def validate_id_birthday(gd, fix_coordination_number_day=True):
"""
Validates the birth_day and returns the datetime.date object for
the birth_day.
If the date is an invalid birth day, a ValueError will be raised.
"""
today = datetime.date.today()
day = int(gd['day'])
if fix_coordination_number_day and day > 60:
day -= 60
if gd['century'] is None:
# The century was not specified, and need to be calculated from todays date
current_year = today.year
year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year'])
if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'):
year -= 100
# If the person is older than 100 years
if gd['sign'] == '+':
year -= 100
else:
year = int(gd['century'] + gd['year'])
# Make sure the year is valid
# There are no swedish personal identity numbers where year < 1800
if year < 1800:
raise ValueError
# ValueError will be raise for invalid dates
birth_day = datetime.date(year, int(gd['month']), day)
# birth_day must not be in the future
if birth_day > today:
raise ValueError
return birth_day
def format_personal_id_number(birth_day, gd):
# birth_day.strftime cannot be used, since it does not support dates < 1900
return unicode(str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def format_organisation_number(gd):
if gd['century'] is None:
century = ''
else:
century = gd['century']
return unicode(century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def valid_organisation(gd):
return gd['century'] in (None, 16) and \
int(gd['month']) >= 20 and \
gd['sign'] in (None, '-') and \
gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/se/utils.py
|
108
|
import re
import datetime
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the
"Luhn"-algoritm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s / 10) + 1) * 10) - s
def validate_id_birthday(gd, fix_coordination_number_day=True):
"""
Validates the birth_day and returns the datetime.date object for
the birth_day.
If the date is an invalid birth day, a ValueError will be raised.
"""
today = datetime.date.today()
day = int(gd['day'])
if fix_coordination_number_day and day > 60:
day -= 60
if gd['century'] is None:
# The century was not specified, and need to be calculated from todays date
current_year = today.year
year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year'])
if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'):
year -= 100
# If the person is older than 100 years
if gd['sign'] == '+':
year -= 100
else:
year = int(gd['century'] + gd['year'])
# Make sure the year is valid
# There are no swedish personal identity numbers where year < 1800
if year < 1800:
raise ValueError
# ValueError will be raise for invalid dates
birth_day = datetime.date(year, int(gd['month']), day)
# birth_day must not be in the future
if birth_day > today:
raise ValueError
return birth_day
def format_personal_id_number(birth_day, gd):
# birth_day.strftime cannot be used, since it does not support dates < 1900
return unicode(str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def format_organisation_number(gd):
if gd['century'] is None:
century = ''
else:
century = gd['century']
return unicode(century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def valid_organisation(gd):
return gd['century'] in (None, 16) and \
int(gd['month']) >= 20 and \
gd['sign'] in (None, '-') and \
gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
|
itsjeyd/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/header_control/__init__.py
|
66
|
"""
This middleware is used for adjusting the headers in a response before it is sent to the end user.
This middleware is intended to sit as close as possible to the top of the middleare list as possible,
so that it is one of the last pieces of middleware to touch the response, and thus can most accurately
adjust/control the headers of the response.
"""
def remove_headers_from_response(response, *headers):
"""Removes the given headers from the response using the header_control middleware."""
response.remove_headers = headers
def force_header_for_response(response, header, value):
"""Forces the given header for the given response using the header_control middleware."""
force_headers = {}
if hasattr(response, 'force_headers'):
force_headers = response.force_headers
force_headers[header] = value
response.force_headers = force_headers
|
iamutkarshtiwari/sympy
|
refs/heads/master
|
sympy/utilities/magic.py
|
123
|
"""Functions that involve magic. """
from __future__ import print_function, division
def pollute(names, objects):
"""Pollute the global namespace with symbols -> objects mapping. """
from inspect import currentframe
frame = currentframe().f_back.f_back
try:
for name, obj in zip(names, objects):
frame.f_globals[name] = obj
finally:
del frame # break cyclic dependencies as stated in inspect docs
|
horance-liu/tensorflow
|
refs/heads/master
|
tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py
|
76
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3, 10]
_NUM_LOSS_PARTITIONS = [2, 4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32, default_value=0),
'age_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'age_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32),
'gender_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'gender_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
return parsing_ops.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['age_indices'].values, [-1]),
array_ops.reshape(parsed['age_values'].values, [-1])),
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['gender_indices'].values, [-1]),
array_ops.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=array_ops.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = variables_lib.Variable(
array_ops.zeros(
[max_age + 1], dtype=dtypes.float32))
gender_weights = variables_lib.Variable(
array_ops.zeros(
[max_gender + 1], dtype=dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_shape_op]):
dense_tensor = array_ops.reshape(
dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
variables_lib.Variable(
array_ops.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return math_ops.cast(
math_ops.greater_equal(predictions,
array_ops.ones_like(predictions) * cutoff),
dtype=dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return math_ops.cast(
math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
dtype=dtypes.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testDistributedSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def Minimize():
with self._single_threaded_test_session():
for _ in range(_MAX_ITERATIONS):
train_op.run()
threads = []
for _ in range(num_loss_partitions):
threads.append(threading.Thread(target=Minimize))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures
# that the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
variables_lib.Variable(array_ops.zeros(
[1], dtype=dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose(
[-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose(
[-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SdcaWithSmoothHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
out_data.eval())
if __name__ == '__main__':
googletest.main()
|
ml-lab/NAMAS
|
refs/heads/master
|
tuning/SDecoder_test.py
|
9
|
#
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# Author: Alexander M Rush <srush@seas.harvard.edu>
# Sumit Chopra <spchopra@fb.com>
# Jason Weston <jase@fb.com>
import os
import sys
#@lint-avoid-python-3-compatibility-imports
d = {"src": sys.argv[1],
"model": sys.argv[2],
"title_len": 14}
for l in open("tuning/blank.params"):
f, val = l.strip().split()
d[f] = val
cmd = "cd $ABS; $CUTH $ABS/summary/run.lua -modelFilename {model} " + \
"-inputf {src} -recombine " + \
"-length {title_len} -blockRepeatWords " + \
"-lmWeight {LM} -unigramBonus {uni} -bigramBonus {bi} " + \
"-trigramBonus {tri} -lengthBonus {length} -unorderBonus {ooo} "
os.system(cmd.format(**d))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.