text stringlengths 4 1.02M | meta dict |
|---|---|
import os
from settings.app_settings import AppSettings
from settings.settings import Settings
from utils.convert import Convert
class Algorithm:
def __init__(self, parent=None):
self._children = []
self._output = None
if parent is not None:
parent.add_children(self)
self.lang = {}
self.config = {}
def name(self):
return self.__class__.__name__
def set_children(self, children):
self._children = Convert.to_list(children)
def get_children(self):
return self._children
def add_children(self, children):
for child in Convert.to_list(children):
if child not in self._children:
self._children += [child]
def update(self, data):
self._output = data
def get_output(self):
return self._output
def update_all(self, data):
self.update(data)
if self.get_output() is None:
return False
for child in self.get_children():
child.update_all(self.get_output())
return True
def load_settings(self, plugin_filename):
filename = os.path.dirname(plugin_filename) + '/' + Convert.to_underscored(self.name())
# Load language-specific settings and translations
lang_filename = filename + '.' + AppSettings.language.abbr + '.lang'
if os.path.isfile(lang_filename):
self.lang = Settings(lang_filename)
# Load plugin configuration (language-independent)
cfg_filename = filename + '.cfg'
if os.path.isfile(cfg_filename):
self.config = Settings(cfg_filename)
| {
"content_hash": "4c9d3081796c44ae7bc18297ea210059",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 95,
"avg_line_length": 30.444444444444443,
"alnum_prop": 0.6082725060827251,
"repo_name": "fpohtmeh/loki",
"id": "14cf47ccfb51471076f8dd545f7666728f2029be",
"size": "1644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algo/algorithm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23208"
}
],
"symlink_target": ""
} |
"""Make sure tests setup & fixtures are all fine"""
import requests
from .utils.http import check_response_ok
# todo: we should check all fixtures in here!
def test_site_read(ckan_url):
"""GET /site_read/ should return 200"""
api_url = ckan_url('/api/3/action/site_read')
response = requests.get(api_url)
data = check_response_ok(response)
assert data['result'] is True
# Call to an invalid URL should return 404
response = requests.get(ckan_url('/api/3/action/site_read/something'))
assert not response.ok
assert response.status_code == 404
| {
"content_hash": "c7ada70af8de4bcc2cc225054e85eca3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 26.636363636363637,
"alnum_prop": 0.6877133105802048,
"repo_name": "opendatatrentino/ckan-api-client",
"id": "bce1f39fa960579110d9e95442e6cd33827175a8",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckan_api_client/tests/test_00_startup_and_fixtures.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "250289"
},
{
"name": "Shell",
"bytes": "11387"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
@pytest.mark.models('en')
def test_issue1959(EN):
texts = ['Apple is looking at buying U.K. startup for $1 billion.']
# nlp = load_test_model('en_core_web_sm')
EN.add_pipe(clean_component, name='cleaner', after='ner')
doc = EN(texts[0])
doc_pipe = [doc_pipe for doc_pipe in EN.pipe(texts)]
assert doc == doc_pipe[0]
def clean_component(doc):
""" Clean up text. Make lowercase and remove punctuation and stopwords """
# Remove punctuation, symbols (#) and stopwords
doc = [tok.text.lower() for tok in doc if (not tok.is_stop
and tok.pos_ != 'PUNCT' and
tok.pos_ != 'SYM')]
doc = ' '.join(doc)
return doc
| {
"content_hash": "413897f9f67c5dea3ea009dec3de9620",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 36.31818181818182,
"alnum_prop": 0.571964956195244,
"repo_name": "aikramer2/spaCy",
"id": "0787af3b75a8f0e4eaddbfc69dd828c8941b53c6",
"size": "814",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spacy/tests/regression/test_issue1959.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103274"
},
{
"name": "C++",
"bytes": "161734"
},
{
"name": "CSS",
"bytes": "42943"
},
{
"name": "HTML",
"bytes": "902655"
},
{
"name": "JavaScript",
"bytes": "17993"
},
{
"name": "Python",
"bytes": "191529488"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
"""Create sample PR curve summary data.
We have 3 classes: R, G, and B. We generate colors within RGB space from 3
normal distributions (1 at each corner of the color triangle: [255, 0, 0],
[0, 255, 0], and [0, 0, 255]).
The true label of each random color is associated with the normal distribution
that generated it.
Using 3 other normal distributions (over the distance each color is from a
corner of the color triangle - RGB), we then compute the probability that each
color belongs to the class. We use those probabilities to generate PR curves.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorboard.plugins.pr_curve import summary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('logdir', '/tmp/pr_curve_demo',
'Directory into which to write TensorBoard data.')
tf.flags.DEFINE_integer('steps', 10,
'Number of steps to generate for each PR curve.')
def start_runs(
logdir,
steps,
run_name,
thresholds,
mask_every_other_prediction=False):
"""Generate a PR curve with precision and recall evenly weighted.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
run_name: The name of the run.
thresholds: The number of thresholds to use for PR curves.
mask_every_other_prediction: Whether to mask every other prediction by
alternating weights between 0 and 1.
"""
tf.reset_default_graph()
tf.set_random_seed(42)
# Create a normal distribution layer used to generate true color labels.
channel_distribution = tf.distributions.Normal(loc=0., scale=142.)
# Sample the distribution to generate colors. Lets generate different numbers
# of each color. The first dimension is the count of examples.
# Generate reds.
number_of_reds = 100
true_reds = tf.clip_by_value(
tf.concat([
255 - tf.abs(channel_distribution.sample([number_of_reds, 1])),
tf.abs(channel_distribution.sample([number_of_reds, 2]))
], axis=1),
0, 255)
# Generate greens.
number_of_greens = 200
true_greens = tf.clip_by_value(
tf.concat([
tf.abs(channel_distribution.sample([number_of_greens, 1])),
255 - tf.abs(channel_distribution.sample([number_of_greens, 1])),
tf.abs(channel_distribution.sample([number_of_greens, 1]))
], axis=1),
0, 255)
# Generate blues.
number_of_blues = 150
true_blues = tf.clip_by_value(
tf.concat([
tf.abs(channel_distribution.sample([number_of_blues, 2])),
255 - tf.abs(channel_distribution.sample([number_of_blues, 1]))
], axis=1),
0, 255)
# Assign each color a vector of 3 booleans based on its true label.
labels = tf.concat([
tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
], axis=0)
# We introduce 3 normal distributions. They are used to predict whether a
# color falls under a certain class (based on distances from corners of the
# color triangle). The distributions vary per color. We have the distributions
# narrow over time.
initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
iteration = tf.placeholder(tf.int32, shape=[])
red_predictor = tf.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[0] - iteration,
dtype=tf.float32))
green_predictor = tf.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[1] - iteration,
dtype=tf.float32))
blue_predictor = tf.distributions.Normal(
loc=0.,
scale=tf.cast(
initial_standard_deviations[2] - iteration,
dtype=tf.float32))
# Make predictions (assign 3 probabilities to each color based on each color's
# distance to each of the 3 corners). We seek double the area in the right
# tail of the normal distribution.
examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
probabilities_colors_are_red = (1 - red_predictor.cdf(
tf.norm(examples - tf.constant([255., 0, 0]), axis=1))) * 2
probabilities_colors_are_green = (1 - green_predictor.cdf(
tf.norm(examples - tf.constant([0, 255., 0]), axis=1))) * 2
probabilities_colors_are_blue = (1 - blue_predictor.cdf(
tf.norm(examples - tf.constant([0, 0, 255.]), axis=1))) * 2
predictions = (
probabilities_colors_are_red,
probabilities_colors_are_green,
probabilities_colors_are_blue
)
# This is the crucial piece. We write data required for generating PR curves.
# We create 1 summary per class because we create 1 PR curve per class.
for i, color in enumerate(('red', 'green', 'blue')):
description = ('The probabilities used to create this PR curve are '
'generated from a normal distribution. Its standard '
'deviation is initially %0.0f and decreases over time.' %
initial_standard_deviations[i])
weights = None
if mask_every_other_prediction:
# Assign a weight of 0 to every even-indexed prediction. Odd-indexed
# predictions are assigned a default weight of 1.
consecutive_indices = tf.reshape(
tf.range(tf.size(predictions[i])), tf.shape(predictions[i]))
weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)
summary.op(
tag=color,
labels=labels[:, i],
predictions=predictions[i],
num_thresholds=thresholds,
weights=weights,
display_name='classifying %s' % color,
description=description)
merged_summary_op = tf.summary.merge_all()
events_directory = os.path.join(logdir, run_name)
sess = tf.Session()
writer = tf.summary.FileWriter(events_directory, sess.graph)
for step in xrange(steps):
feed_dict = {
iteration: step,
}
merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
writer.add_summary(merged_summary, step)
writer.close()
def run_all(logdir, steps, thresholds, verbose=False):
"""Generate PR curve summaries.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
verbose: Whether to print the names of runs into stdout during execution.
thresholds: The number of thresholds to use for PR curves.
"""
# First, we generate data for a PR curve that assigns even weights for
# predictions of all classes.
run_name = 'colors'
if verbose:
print('--- Running: %s' % run_name)
start_runs(
logdir=logdir,
steps=steps,
run_name=run_name,
thresholds=thresholds)
# Next, we generate data for a PR curve that assigns arbitrary weights to
# predictions.
run_name = 'mask_every_other_prediction'
if verbose:
print('--- Running: %s' % run_name)
start_runs(
logdir=logdir,
steps=steps,
run_name=run_name,
thresholds=thresholds,
mask_every_other_prediction=True)
def main(unused_argv):
print('Saving output to %s.' % FLAGS.logdir)
run_all(FLAGS.logdir, FLAGS.steps, 50, verbose=True)
print('Done. Output saved to %s.' % FLAGS.logdir)
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "1d097e085cb79a3bead39268bcdfaed4",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 80,
"avg_line_length": 35.733333333333334,
"alnum_prop": 0.6671108742004265,
"repo_name": "agrubb/tensorboard",
"id": "5f9794fc248cbdf016068248086e4fb75f58bbbe",
"size": "8193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorboard/plugins/pr_curve/pr_curve_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "744533"
},
{
"name": "Java",
"bytes": "26959"
},
{
"name": "JavaScript",
"bytes": "3438"
},
{
"name": "Protocol Buffer",
"bytes": "10291"
},
{
"name": "Python",
"bytes": "1120020"
},
{
"name": "Shell",
"bytes": "7322"
},
{
"name": "TypeScript",
"bytes": "838885"
}
],
"symlink_target": ""
} |
import urllib
import hashlib
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.encoding import force_bytes, force_text
from readthedocs.projects.models import Project
from readthedocs.core.resolver import resolve
register = template.Library()
@register.filter
def gravatar(email, size=48):
"""hacked from djangosnippets.org, but basically given an email address
render an img tag with the hashed up bits needed for leetness
omgwtfstillreading
"""
url = "http://www.gravatar.com/avatar.php?%s" % urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size)
})
return ('<img src="%s" width="%s" height="%s" alt="gravatar" '
'class="gravatar" border="0" />' % (url, size, size))
@register.simple_tag(name="doc_url")
def make_document_url(project, version=None, page=''):
if not project:
return ""
return resolve(project=project, version_slug=version, filename=page)
@register.filter(is_safe=True)
def restructuredtext(value, short=False):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'restructuredtext' filter: "
"The Python docutils library isn't installed."
)
return force_text(value)
else:
docutils_settings = {
'raw_enabled': False,
'file_insertion_enabled': False,
}
docutils_settings.update(getattr(settings, 'RESTRUCTUREDTEXT_FILTER_SETTINGS', {}))
parts = publish_parts(source=force_bytes(value), writer_name="html4css1",
settings_overrides=docutils_settings)
out = force_text(parts["fragment"])
try:
if short:
out = out.split("\n")[0]
except IndexError:
pass
finally:
return mark_safe(out)
@register.filter
def get_project(slug):
try:
return Project.objects.get(slug=slug)
except:
return None
@register.filter
def get_version(slug):
try:
return Project.objects.get(slug=slug)
except:
return None
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter
def key(d, key_name):
return d[key_name]
| {
"content_hash": "e41acbe94aebd85d94b7b9e31b903079",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 91,
"avg_line_length": 27.395604395604394,
"alnum_prop": 0.636983553951063,
"repo_name": "techtonik/readthedocs.org",
"id": "1c07fd27255b6f9f6dd7bb3fd27111cd4eec21eb",
"size": "2493",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "readthedocs/core/templatetags/core_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "55493"
},
{
"name": "HTML",
"bytes": "194633"
},
{
"name": "JavaScript",
"bytes": "438957"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "893745"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
import constants.main_window_constants as const
import utils.utils as utils
import tkinter as tk
class MainGuiButtonsF(tk.Frame):
""" Main GUI buttons."""
def __init__(self,
parent,
info_eh,
create_data_set_eh,
train_eh,
test_eh,
help_eh,
exit_eh):
""" Creates and places the buttons"""
tk.Frame.__init__(self,
parent,
padx=const.MGB_PADX,
pady=const.MGB_PADY)
self._create_data_set_eh = create_data_set_eh
self._train_eh = train_eh
self._info_eh = info_eh
self._test_eh = test_eh
self._help_eh = help_eh
self._exit_eh = exit_eh
try:
self._load_icons()
except FileNotFoundError:
self._load_icons_failed()
self._create_frames()
self._create_buttons()
self._place_widgets()
#########################################################################
# Widget creation and placement
def _load_icons(self):
""" Loads the images that will be displayed in the buttons."""
self._img_info = utils.load_and_resize_image(
const.MGB_BTN_INFO_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
self._img_data_set = utils.load_and_resize_image(
const.MGB_BTN_CREATE_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
self._img_train_sess = utils.load_and_resize_image(
const.MGB_BTN_TRAIN_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
self._img_test_sess = utils.load_and_resize_image(
const.MGB_BTN_TEST_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
self._img_help = utils.load_and_resize_image(
const.MGB_BTN_HELP_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
self._exit_image = utils.load_and_resize_image(
const.MGB_BTN_EXIT_IMG_PATH,
const.MW_IMG_WIDTH,
const.MW_IMG_HEIGHT
)
def _load_icons_failed(self):
""" Called when at least one of the images was not found."""
self._img_data_set = None
self._img_train_sess = None
self._img_test_sess = None
self._exit_image = None
self._img_help = None
self._img_info = None
def _create_frames(self):
""" Creates the frames."""
self._f_buttons = tk.Frame(
self,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_info = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_data_set = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_train = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_test = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_help = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
self._f_exit = tk.Frame(
self._f_buttons,
padx=const.MW_FRAMES_PADX,
pady=const.MW_FRAMES_PADY
)
def _create_buttons(self):
""" Creates the buttons."""
self._btn_info = tk.Button(
self._f_info,
state='disabled', # TODO
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_INFO_BTN,
image=self._img_info,
font=const.MW_FONT,
command=self._local_info_eh,
compound='left',
bd=3
)
self._btn_data_set = tk.Button(
self._f_data_set,
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_DATA_SET_BTN,
image=self._img_data_set,
font=const.MW_FONT,
command=self._local_create_data_set_eh,
compound='left',
bd=3
)
self._btn_train = tk.Button(
self._f_train,
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_TRAIN_BTN,
image=self._img_train_sess,
font=const.MW_FONT,
command=self._local_train_eh,
compound='left',
bd=3
)
self._btn_test = tk.Button(
self._f_test,
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_TEST_BTN,
image=self._img_test_sess,
font=const.MW_FONT,
command=self._local_test_eh,
compound='left',
bd=3
)
self._btn_help = tk.Button(
self._f_help,
state='disabled', # TODO
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_HELP_BTN,
image=self._img_help,
font=const.MW_FONT,
command=self._local_help_eh,
compound='left',
bd=3
)
self._btn_exit = tk.Button(
self._f_exit,
padx=const.MW_BTN_PADX,
pady=const.MW_BTN_PADY,
text=const.MGB_EXIT_BTN,
image=self._exit_image,
font=const.MW_FONT,
command=self._exit_eh,
compound='left',
bd=3
)
def _place_widgets(self):
self._btn_info.pack(fill='both',
expand=True)
self._btn_data_set.pack(fill='both',
expand=True)
self._btn_train.pack(fill='both',
expand=True)
self._btn_test.pack(fill='both',
expand=True)
self._btn_help.pack(fill='both',
expand=True)
self._btn_exit.pack(fill='both',
expand=True)
self._f_info.pack(side='left',
fill='both',
expand=True)
self._f_data_set.pack(side='left',
fill='both',
expand=True)
self._f_train.pack(side='left',
fill='both',
expand=True)
self._f_test.pack(side='left',
fill='both',
expand=True)
self._f_help.pack(side='left',
fill='both',
expand=True)
self._f_exit.pack(side='left',
fill='both',
expand=True)
self._f_buttons.pack(side='top',
fill='x')
def _raise_buttons(self):
self._btn_info.config(relief="raised")
self._btn_data_set.config(relief="raised")
self._btn_train.config(relief="raised")
self._btn_test.config(relief="raised")
self._btn_help.config(relief="raised")
#########################################################################
# Event handling
def _local_info_eh(self):
self._raise_buttons()
self._btn_info.config(relief="sunken")
self._info_eh()
def _local_create_data_set_eh(self):
self._raise_buttons()
self._btn_data_set.config(relief="sunken")
self._create_data_set_eh()
def _local_train_eh(self):
self._raise_buttons()
self._btn_train.config(relief="sunken")
self._train_eh()
def _local_test_eh(self):
self._raise_buttons()
self._btn_test.config(relief="sunken")
self._test_eh()
def _local_help_eh(self):
self._raise_buttons()
self._btn_help.config(relief="sunken")
self._help_eh()
#########################################################################
# Public methods
def enable(self):
self._btn_data_set.config(state='normal')
self._btn_train.config(state='normal')
# self._btn_info.config(state='normal') TODO
self._btn_test.config(state='normal')
# self._btn_help.config(state='normal') TODO
def disable(self):
self._btn_data_set.config(state='disabled')
self._btn_train.config(state='disabled')
# self._btn_info.config(state='disabled') TODO
self._btn_test.config(state='disabled')
# self._btn_help.config(state='disabled') TODO
#########################################################################
| {
"content_hash": "2937e5197a0ffbc9a1213cc276931e4e",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 77,
"avg_line_length": 28.246875,
"alnum_prop": 0.4673083305675407,
"repo_name": "dani-i/bachelor-project",
"id": "1f0a07fc16081f7118001ea79792d5439ff6e54a",
"size": "9039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/widgets/main_gui_buttons_f.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "566079"
}
],
"symlink_target": ""
} |
""" Module to manage dependencies between pythran types. """
import gast as ast
import itertools
import os
from pythran.analyses import GlobalDeclarations
from pythran.errors import PythranInternalError
from pythran.passmanager import ModuleAnalysis
from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE
from pythran.utils import get_variable
from pythran.typing import List, Set, Dict, NDArray, Tuple, Pointer, Fun
from pythran.graph import DiGraph
def pytype_to_deps_hpp(t):
"""python -> pythonic type hpp filename."""
if isinstance(t, List):
return {'list.hpp'}.union(pytype_to_deps_hpp(t.__args__[0]))
elif isinstance(t, Set):
return {'set.hpp'}.union(pytype_to_deps_hpp(t.__args__[0]))
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return {'dict.hpp'}.union(pytype_to_deps_hpp(tkey),
pytype_to_deps_hpp(tvalue))
elif isinstance(t, Tuple):
return {'tuple.hpp'}.union(*[pytype_to_deps_hpp(elt)
for elt in t.__args__])
elif isinstance(t, NDArray):
out = {'ndarray.hpp'}
# it's a transpose!
if t.__args__[1].start == -1:
out.add('numpy_texpr.hpp')
return out.union(pytype_to_deps_hpp(t.__args__[0]))
elif isinstance(t, Pointer):
return {'pointer.hpp'}.union(pytype_to_deps_hpp(t.__args__[0]))
elif isinstance(t, Fun):
return {'cfun.hpp'}.union(*[pytype_to_deps_hpp(a) for a in t.__args__])
elif t in PYTYPE_TO_CTYPE_TABLE:
return {'{}.hpp'.format(t.__name__)}
else:
raise NotImplementedError("{0}:{1}".format(type(t), t))
def pytype_to_deps(t):
""" python -> pythonic type header full path. """
res = set()
for hpp_dep in pytype_to_deps_hpp(t):
res.add(os.path.join('pythonic', 'types', hpp_dep))
res.add(os.path.join('pythonic', 'include', 'types', hpp_dep))
return res
class TypeDependencies(ModuleAnalysis):
"""
Gathers the callees of each function required for type inference.
This analyse produces a directed graph with functions as nodes and edges
between nodes when a function might call another.
Check usual behavior.
>>> import gast as ast
>>> from pythran import passmanager
>>> pm = passmanager.PassManager("test")
>>> node = ast.parse('''
... def foo(n):
... return 1 if copy(n) else copy(n)
... def copy(n):
... return n == 2''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
3
foo result depend on : NoDeps and copy
copy result depend on : NoDeps
Check that content assignment is a dependency.
>>> node = ast.parse('''
... def foo(n):
... n[1] = copy(n)
... return 1 if copy(n) else n
... def copy(n):
... return n == 2''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
3
foo result depend on : NoDeps and copy
copy result depend on : NoDeps
Check augassign add a dependencies but don't remove the old one.
>>> node = ast.parse('''
... def bar(n):
... return n
... def foo(n):
... n[1] = copy(n)
... n[1] += bar(1)
... return 1 if copy(n) else n
... def copy(n):
... return n == 2''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
5
bar result depend on : NoDeps
foo result depend on : NoDeps, bar and copy
copy depend on : NoDeps
Check a if statement handle both branches
>>> node = ast.parse('''
... def bar(n):
... return n
... def foo(n):
... if n:
... n = bar()
... else:
... n = 4
... return 1 or n''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
3
Check we do not add everything from a conditional statement.
>>> node = ast.parse('''
... def bar(n):
... return n
... def foo(n):
... if n:
... n = bar()
... n = 3
... else:
... n = 4
... return 1 or n''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
2
bar result depend on : NoDeps
foo result depend on : NoDeps only
Check dependency on for target variable
>>> node = ast.parse('''
... def bar(n):
... return builtins.range(n)
... def foo(n):
... for i in bar(n):
... i = 2
... return i''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
2
bar result depend on : NoDeps
foo result depend on : NoDeps
Check dependency on for target variable with no deps if we don't start
>>> node = ast.parse('''
... def bar(n):
... return builtins.range(n)
... def foo(n):
... i = 4
... for i in bar(n):
... pass
... return i''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
3
bar result depend on : NoDeps
foo result depend on : NoDeps and bar
Check dependency on for target variable with deps
>>> node = ast.parse('''
... def bar(n):
... return builtins.range(n)
... def foo(n):
... for i in bar(n):
... pass
... return i''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
2
bar result depend on : NoDeps
foo result depend on : NoDeps and bar
Check conditional without else branch.
>>> node = ast.parse('''
... def foo(n):
... res = 3
... if n:
... res = foo(n - 1)
... return res''')
>>> res = pm.gather(TypeDependencies, node)
>>> len(res.edges)
2
foo result depend on : NoDeps and foo
FIXME : We should use CFG to perform better function dependencies.
Check conditional without break
>> node = ast.parse('''
.. def bar2(n):
.. return builtins.range(n)
.. def bar(n):
.. return builtins.range(n)
.. def foo(n):
.. for i in bar(n):
.. if i:
.. j = bar(n)
.. break
.. j = bar2(n)
.. return j''')
>> res = pm.gather(TypeDependencies, node)
>> len(res.edges)
4
bar result depend on : NoDeps
bar2 result depend on : NoDeps
foo result depend on : bar ad bar2
"""
NoDeps = "None"
def __init__(self):
""" Create empty result graph and gather global declarations. """
self.result = DiGraph()
self.current_function = None
self.naming = dict() # variable to dependencies for current function.
# variable to dependencies for current conditional statement
self.in_cond = dict()
ModuleAnalysis.__init__(self, GlobalDeclarations)
def prepare(self, node):
"""
Add nodes for each global declarations in the result graph.
No edges are added as there are no type builtin type dependencies.
"""
super(TypeDependencies, self).prepare(node)
for v in self.global_declarations.values():
self.result.add_node(v)
self.result.add_node(TypeDependencies.NoDeps)
def visit_any_conditionnal(self, node1, node2):
"""
Set and restore the in_cond variable before visiting subnode.
Compute correct dependencies on a value as both branch are possible
path.
"""
true_naming = false_naming = None
try:
tmp = self.naming.copy()
for expr in node1:
self.visit(expr)
true_naming = self.naming
self.naming = tmp
except KeyError:
pass
try:
tmp = self.naming.copy()
for expr in node2:
self.visit(expr)
false_naming = self.naming
self.naming = tmp
except KeyError:
pass
if true_naming and not false_naming:
self.naming = true_naming
elif false_naming and not true_naming:
self.naming = false_naming
elif true_naming and false_naming:
self.naming = false_naming
for k, v in true_naming.items():
if k not in self.naming:
self.naming[k] = v
else:
for dep in v:
if dep not in self.naming[k]:
self.naming[k].append(dep)
def visit_FunctionDef(self, node):
"""
Initialize variable for the current function to add edges from calls.
We compute variable to call dependencies and add edges when returns
are reach.
"""
# Ensure there are no nested functions.
assert self.current_function is None
self.current_function = node
self.naming = dict()
self.in_cond = False # True when we are in a if, while or for
self.generic_visit(node)
self.current_function = None
def visit_Return(self, node):
"""
Add edge from all possible callee to current function.
Gather all the function call that led to the creation of the
returned expression and add an edge to each of this function.
When visiting an expression, one returns a list of frozensets. Each
element of the list is linked to a possible path, each element of a
frozenset is linked to a dependency.
"""
if not node.value:
# Yielding function can't return values
return
for dep_set in self.visit(node.value):
if dep_set:
for dep in dep_set:
self.result.add_edge(dep, self.current_function)
else:
self.result.add_edge(TypeDependencies.NoDeps,
self.current_function)
visit_Yield = visit_Return
def visit_Assign(self, node):
"""
In case of assignment assign value depend on r-value type dependencies.
It is valid for subscript, `a[i] = foo()` means `a` type depend on
`foo` return type.
"""
value_deps = self.visit(node.value)
for target in node.targets:
name = get_variable(target)
if isinstance(name, ast.Name):
self.naming[name.id] = value_deps
def visit_AugAssign(self, node):
"""
AugAssigned value depend on r-value type dependencies.
It is valid for subscript, `a[i] += foo()` means `a` type depend on
`foo` return type and previous a types too.
"""
args = (self.naming[get_variable(node.target).id],
self.visit(node.value))
merge_dep = list({frozenset.union(*x)
for x in itertools.product(*args)})
self.naming[get_variable(node.target).id] = merge_dep
def visit_For(self, node):
"""
Handle iterator variable in for loops.
Iterate variable may be the correct one at the end of the loop.
"""
body = node.body
if node.target.id in self.naming:
body = [ast.Assign(targets=[node.target], value=node.iter,
type_comment=None)] + body
self.visit_any_conditionnal(body, node.orelse)
else:
iter_dep = self.visit(node.iter)
self.naming[node.target.id] = iter_dep
self.visit_any_conditionnal(body, body + node.orelse)
def visit_BoolOp(self, node):
""" Return type may come from any boolop operand. """
return sum((self.visit(value) for value in node.values), [])
def visit_BinOp(self, node):
""" Return type depend from both operand of the binary operation. """
args = [self.visit(arg) for arg in (node.left, node.right)]
return list({frozenset.union(*x) for x in itertools.product(*args)})
def visit_UnaryOp(self, node):
""" Return type depend on operand only. """
return self.visit(node.operand)
@staticmethod
def visit_Lambda(_):
""" Lambda have to be remove before. """
assert False
def visit_IfExp(self, node):
""" Return value depend on both if branch. """
return self.visit(node.body) + self.visit(node.orelse)
@staticmethod
def visit_Compare(_):
""" Comparison return a bool so there are no dependencies. """
return [frozenset()]
def visit_Call(self, node):
"""
Function call depend on all function use in the call.
>> a = foo(bar(c) or foobar(d))
Return type depend on [foo, bar] or [foo, foobar]
"""
args = [self.visit(arg) for arg in node.args]
func = self.visit(node.func)
params = args + [func or []]
return list({frozenset.union(*p) for p in itertools.product(*params)})
@staticmethod
def visit_Constant(_):
""" Return no dependencies on others functions. """
return [frozenset()]
@staticmethod
def visit_Attribute(_):
""" Return no dependencies on others functions. """
return [frozenset()]
def visit_Subscript(self, node):
"""
Return dependencies of the subscripted value.
a = foo()[0] means `a` have a dependency on `foo` return type.
"""
return self.visit(node.value)
def visit_Name(self, node):
"""
Return dependencies for given variable.
It have to be register first.
"""
if node.id in self.naming:
return self.naming[node.id]
elif node.id in self.global_declarations:
return [frozenset([self.global_declarations[node.id]])]
elif isinstance(node.ctx, ast.Param):
deps = [frozenset()]
self.naming[node.id] = deps
return deps
else:
raise PythranInternalError("Variable '{}' used before assignment"
"".format(node.id))
def visit_List(self, node):
""" List construction depend on each elements type dependency. """
if node.elts:
return list(set(sum([self.visit(elt) for elt in node.elts], [])))
else:
return [frozenset()]
visit_Set = visit_List
def visit_Dict(self, node):
""" Dict construction depend on each element/value type dependency."""
if node.keys:
items = node.keys + node.values
return list(set(sum([self.visit(item) for item in items], [])))
else:
return [frozenset()]
visit_Tuple = visit_List
@staticmethod
def visit_Slice(_):
""" Slice are not part of return type dependency information. """
assert False
@staticmethod
def visit_Index(_):
""" Index are not part of return type dependency information. """
assert False
def visit_If(self, node):
""" Both if branches may be evaluate first. """
return self.visit_any_conditionnal(node.body, node.orelse)
def visit_While(self, node):
""" Both while branches may be evaluate first. """
return self.visit_any_conditionnal(node.body, node.orelse)
def visit_ExceptHandler(self, node):
""" Exception may declare a new variable. """
if node.name:
self.naming[node.name.id] = [frozenset()]
for stmt in node.body:
self.visit(stmt)
| {
"content_hash": "2dfc0681a54b6fc8355fe6c027c66bc0",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 79,
"avg_line_length": 31.569387755102042,
"alnum_prop": 0.561380826168466,
"repo_name": "pombredanne/pythran",
"id": "ac93c9a4634f7b6d4cbb92d5ad2d5c39d2652ad6",
"size": "15469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythran/types/type_dependencies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1366767"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "1209572"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
} |
__author__ = 'davidbyttow@google.com (David Byttow)'
import hashlib
import random
import time
import urlparse
from types import ListType
import data
import http
from opensocial import simplejson
def generate_uuid(*args):
"""Simple method for generating a unique identifier.
Args: Any arguments used to help make this uuid more unique.
Returns: A 128-bit hex identifier.
"""
t = long(time.time() * 1000)
r = long(random.random() * 1000000000000000L)
a = random.random() * 1000000000000000L
data = '%s %s %s %s' % (str(t), str(r), str(a), str(args))
return hashlib.md5(data).hexdigest()
class Request(object):
"""Represents an OpenSocial request that can be processed via RPC or REST."""
def __init__(self, rest_request, rpc_request, requestor=None):
self.rest_request = rest_request
self.rpc_request = rpc_request
self.set_requestor(requestor)
def get_requestor(self):
"""Get the requestor id for this request.
Returns: The requestor's id.
"""
return self.__requestor
def set_requestor(self, id):
"""Set the requestor id for this request.
This does not accept any keywords such as @me.
TODO: Refactor the id check out of here, it feels wrong.
Args:
id: str The requestor's id.
"""
if id and id[0] is not '@':
self.__requestor = id
else:
self.__requestor = None
def get_query_params(self):
"""Returns the query params string for this request."""
query_params = {}
if self.get_requestor():
query_params['xoauth_requestor_id'] = self.get_requestor()
return query_params
def make_rest_request(self, url_base):
"""Creates a RESTful HTTP request.
Args:
url_base: str The base REST URL.
"""
return self.rest_request.make_http_request(url_base,
self.get_query_params())
def get_rpc_body(self):
return self.rpc_request.get_rpc_body()
class FetchPeopleRequest(Request):
"""A request for handling fetching a collection of people."""
def __init__(self, user_id, group_id, fields=None, params=None):
params = params or {}
if fields:
params['fields'] = ','.join(fields)
rest_request = RestRequestInfo('/'.join(('people', user_id, group_id)),
params=params)
rpc_params = params.copy()
rpc_params.update({'userId': user_id,
'groupId': group_id})
rpc_request = RpcRequestInfo('people.get', params=rpc_params)
super(FetchPeopleRequest, self).__init__(rest_request,
rpc_request,
user_id)
def process_json(self, json):
"""Construct the appropriate OpenSocial object from a JSON dict.
Args:
json: dict The JSON structure.
Returns: a Collection of Person objects.
"""
return data.Collection.parse_json(json, data.Person)
class FetchPersonRequest(FetchPeopleRequest):
"""A request for handling fetching a single person by id."""
def __init__(self, user_id, fields=None, params={}):
super(FetchPersonRequest, self).__init__(user_id,
'@self',
fields=fields,
params=params)
def process_json(self, json):
"""Construct the appropriate OpenSocial object from a JSON dict.
Args:
json: dict The JSON structure.
Returns: A Person object.
"""
return data.Person.parse_json(json)
class FetchAppDataRequest(Request):
"""A request for handling fetching app data."""
def __init__(self, user_id, group_id, app_id='@app', fields=None,
params=None):
params = params or {}
if fields:
params['fields'] = ','.join(fields)
rest_path = '/'.join(('appdata', user_id, group_id, app_id))
rest_request = RestRequestInfo(rest_path, params=params)
# TODO: Handle REST fields.
params.update({'userId': user_id,
'groupId': group_id,
'appId': app_id,
'keys': fields})
rpc_request = RpcRequestInfo('appdata.get', params=params)
super(FetchAppDataRequest, self).__init__(rest_request,
rpc_request,
user_id)
def process_json(self, json):
"""Construct the appropriate OpenSocial object from a JSON dict.
Args:
json: dict The JSON structure.
Returns: An AppData object.
"""
if type(json) == ListType:
return json
else:
return data.AppData.parse_json(json)
class UpdateAppDataRequest(Request):
"""A request for handling updating app data."""
def __init__(self, user_id, group_id, app_id='@app', fields=None, data={},
params=None):
params = params or {}
if fields:
params['fields'] = ','.join(fields)
params['data'] = data
#TODO: add support for rest
params.update({'userId': user_id,
'groupId': group_id,
'appId': app_id})
rpc_request = RpcRequestInfo('appdata.update', params=params)
super(UpdateAppDataRequest, self).__init__(None,
rpc_request,
user_id)
def process_json(self, json):
return json
class DeleteAppDataRequest(Request):
"""A request for handling deleting app data."""
def __init__(self, user_id, group_id, app_id='@app', fields=None,
params=None):
params = params or {}
if fields:
params['fields'] = ','.join(fields)
#TODO: add support for rest
params.update({'userId': user_id,
'groupId': group_id,
'appId': app_id,
'keys': params['fields']})
rpc_request = RpcRequestInfo('appdata.delete', params=params)
super(DeleteAppDataRequest, self).__init__(None,
rpc_request,
user_id)
def process_json(self, json):
return json
class RestRequestInfo(object):
"""Represents a pending REST request."""
def __init__(self, path, method='GET', params=None):
self.method = method
self.path = path
self.params = params or {}
def make_http_request(self, url_base, query_params=None):
"""Generates a http.Request object for the UrlFetch interface.
Args:
url_base: str The base REST URL.
Returns: The http.Request object.
"""
# Ensure that there is a path separator.
if url_base[-1] is not '/' and self.path[0] is not '/':
url_base = url_base + '/'
url = url_base + self.path
if query_params:
self.params.update(query_params)
return http.Request(url, method=self.method, signed_params=self.params)
class TextRpcRequest(Request):
""" Represents an RPC request which is not configured with parameters, but
a raw text blob. Intended for debugging or developer tools."""
def __init__(self, rpc_body, requestor=None):
self.__rpc_body = rpc_body;
self.set_requestor(requestor)
def get_rpc_body(self):
return simplejson.loads(self.__rpc_body)
def get_requestor(self):
"""Get the requestor id for this request.
Returns: The requestor's id.
"""
return self.__requestor
def set_requestor(self, id):
"""Set the requestor id for this request.
This does not accept any keywords such as @me.
TODO: Refactor the id check out of here, it feels wrong.
Args:
id: str The requestor's id.
"""
if id and id[0] is not '@':
self.__requestor = id
else:
self.__requestor = None
def process_json(self, json):
return json
class RpcRequestInfo(object):
"""Represents a pending RPC request."""
def __init__(self, method, params, id=None):
self.method = method
self.params = params
self.id = id or generate_uuid(method)
def get_rpc_body(self):
"""Creates the JSON dict structure for thie RPC request.
Returns: dict The JSON body for this RPC.
"""
rpc_body = {
'params': self.params,
'method': self.method,
'id': self.id,
}
return rpc_body
class RequestBatch(object):
"""This class will manage the batching of requests."""
def __init__(self):
self.requests = {}
self.data = {}
def add_request(self, key, request):
"""Adds a request to this batch.
Args:
key: str A unique key to pair with the result of this request.
request: obj The request object.
"""
if key:
request.rpc_request.id = key
self.requests[key] = request
def get(self, key):
"""Get the result value for a given request key.
Args:
key: str The key to retrieve.
"""
return self.data.get(key)
def send(self, container):
"""Execute the batch with the specified container.
Args:
container: The container to execute this batch on.
"""
container.send_request_batch(self, False)
def _set_data(self, key, data):
self.data[key] = data
| {
"content_hash": "228405ed3c7797c35cab89c5d08d2470",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 79,
"avg_line_length": 27.291176470588237,
"alnum_prop": 0.5870244638430865,
"repo_name": "CollabQ/CollabQ",
"id": "a41e773c42904c602f04b623456cf4674e21d3f7",
"size": "9887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/opensocial/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "JavaScript",
"bytes": "327809"
},
{
"name": "Python",
"bytes": "6590397"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5624"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_container_state import V1ContainerState
class TestV1ContainerState(unittest.TestCase):
""" V1ContainerState unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ContainerState(self):
"""
Test V1ContainerState
"""
model = lib_openshift.models.v1_container_state.V1ContainerState()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "397d93fe240471d5ab3d92284c854abc",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 25.03921568627451,
"alnum_prop": 0.6985121378230227,
"repo_name": "detiber/lib_openshift",
"id": "51185206e6ff998fadd96700847c2c90f3ecae5e",
"size": "1294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_v1_container_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
import os
import pytest
import sqlalchemy as sa
from sqlalchemy_continuum.dialects.postgresql import (
drop_trigger,
sync_trigger
)
from tests import (
get_dns_from_driver,
get_driver_name,
QueryPool,
uses_native_versioning
)
@pytest.mark.skipif('not uses_native_versioning()')
class TestTriggerSyncing(object):
def setup_method(self, method):
driver = os.environ.get('DB', 'sqlite')
self.driver = get_driver_name(driver)
self.engine = sa.create_engine(get_dns_from_driver(self.driver))
self.connection = self.engine.connect()
if driver == 'postgres-native':
self.connection.execute('CREATE EXTENSION IF NOT EXISTS hstore')
self.connection.execute(
'CREATE TABLE article '
'(id INT PRIMARY KEY, name VARCHAR(200), content TEXT)'
)
self.connection.execute(
'CREATE TABLE article_version '
'(id INT, transaction_id INT, name VARCHAR(200), '
'name_mod BOOLEAN, PRIMARY KEY (id, transaction_id))'
)
def teardown_method(self, method):
self.connection.execute('DROP TABLE IF EXISTS article')
self.connection.execute('DROP TABLE IF EXISTS article_version')
self.engine.dispose()
self.connection.close()
def test_sync_triggers(self):
sync_trigger(self.connection, 'article_version')
assert (
'DROP TRIGGER IF EXISTS article_trigger ON "article"'
in QueryPool.queries[-4]
)
assert 'DROP FUNCTION ' in QueryPool.queries[-3]
assert 'CREATE OR REPLACE FUNCTION ' in QueryPool.queries[-2]
assert 'CREATE TRIGGER ' in QueryPool.queries[-1]
sync_trigger(self.connection, 'article_version')
def test_drop_triggers(self):
drop_trigger(self.connection, 'article')
assert (
'DROP TRIGGER IF EXISTS article_trigger ON "article"'
in QueryPool.queries[-2]
)
assert 'DROP FUNCTION ' in QueryPool.queries[-1]
| {
"content_hash": "c8a2ddcc619820e3473f69e56b7d57d3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 33.52459016393443,
"alnum_prop": 0.6327628361858191,
"repo_name": "rmoorman/sqlalchemy-continuum",
"id": "a9f96d6d630b2a0b3817086e41319cbf4f58ab0f",
"size": "2045",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/dialects/test_triggers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "304902"
}
],
"symlink_target": ""
} |
import os, struct, datetime
class HBNReader:
"""Reads a binary output file from an HSPF simulation."""
def all_occurences(self, s):
"""
Returns all the indices of 'PERLND', 'IMPLND', and 'RCHRES' in byte
string "s".
"""
i = 0
while True:
perlnd = s.find(b'PERLND', i)
implnd = s.find(b'IMPLND', i)
rchres = s.find(b'RCHRES', i)
if all([perlnd == -1, implnd == -1, rchres == -1]):
return
else:
if perlnd == -1: perlnd = len(s)
if implnd == -1: implnd = len(s)
if rchres == -1: rchres = len(s)
i = min(perlnd, implnd, rchres)
yield i - 8
i += 6
def read(self, binfilename):
"""
Reads data in the file into a data structure and returns the structure.
The data are packaged in a series of dictionaries as follows:
-operation type (e.g., PERLND, IMPLND, RCHRES)
-operation number (e.g., 101, 102)
-operation section (e.g., PWATER, SEDMNT, HYDR)
-section variable name (e.g., PERO, SURO, SURS)
-list of (date, value) pairs (e.g., 10/01/2001, 3.4)
so for example, to get the values of PERO from PERLND 101 in a file:
results = hbnreader.read(file)
data = results['PERLND'][101]['PWATER']['PERO']
the data are packaged asx a list of (time, value) pairs.
"""
# read the file into memory (if the files were huge this might not be
# the most efficient way to do this, but most hbn files are small)
with open(binfilename, 'rb') as f: data = f.read()
# create data structures to organize the information in the file
results = {} # values
variables = {} # keeps track of the variable order in the records
# parse through the file to the end using the indices of all perlnd,
# implnd, and rchres byte strings in the data
for i in self.all_occurences(data):
# read the length of the record
r1, r2, r3, r4 = struct.unpack('4B', data[i:i+4])
# calculate the length
reclen = r1 // 4 + r2 * 2**6 + r3 * 2**14 + r4 * 2**22
# unpack information about the data set
rectype, op, no, sec = struct.unpack('I8sI8s', data[i+4:i+28])
op = op.strip().decode()
sec = sec.strip().decode()
# add an operation type dictionary to the data dictionary
if op not in results:
results[op] = {}
variables[op] = {}
# add operation number dictionary to the operation type dictionary
if no not in results[op]:
results[op][no] = {}
variables[op][no] = {}
# add the operation module to the operation number dictionary
if sec not in results[op][no]:
results[op][no][sec] = {}
variables[op][no][sec] = []
# rectype = 0 mean a list of the variables
if rectype == 0:
# loop through the variable names for the section
j = i + 32
while j + 4 < i + reclen:
# get the length of the variable name
l = struct.unpack('I', data[j-4:j])[0]
# read the variable name
v = struct.unpack('{}s'.format(l),data[j:j+l])[0].decode()
# add the variable name in the data structures
variables[op][no][sec].append(v)
results[op][no][sec][v] = []
j += 4 + l
# rectype = 1 means data values for the variables
if rectype == 1:
# read the date
u, l, yr, mo, da, hr, mi = struct.unpack('7I', data[i+28:i+56])
# Data record
n = len(variables[op][no][sec])
# adjust the HSPF time to real time
t = (datetime.datetime(yr, mo, da, 0, mi) +
datetime.timedelta(hours = hr))
# read the data
values = struct.unpack('{}f'.format(n), data[i+56:i+56+4*n])
# package up the data in the output dictionary
for n, v in zip(variables[op][no][sec], values):
results[op][no][sec][n].append((t, v))
return results
| {
"content_hash": "0fd4fb20a4bc2c74e3e3783318b6a9f3",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 30.85135135135135,
"alnum_prop": 0.49627682873412177,
"repo_name": "kbrannan/PyHSPF",
"id": "a8e7e7290c46729b6e8b1e5e7868a50457d789c0",
"size": "4772",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pyhspf/core/hbnreader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "335"
},
{
"name": "C",
"bytes": "7355"
},
{
"name": "C++",
"bytes": "169875"
},
{
"name": "FORTRAN",
"bytes": "3848038"
},
{
"name": "PHP",
"bytes": "25231"
},
{
"name": "Pascal",
"bytes": "457"
},
{
"name": "Python",
"bytes": "1341946"
},
{
"name": "Shell",
"bytes": "4514"
},
{
"name": "SourcePawn",
"bytes": "4265"
}
],
"symlink_target": ""
} |
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(
usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin",
action="append",
help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, sys, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s' % name] = value
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug)
# THE END
| {
"content_hash": "65e86d1675853918628661315e2d65b1",
"timestamp": "",
"source": "github",
"line_count": 3890,
"max_line_length": 103,
"avg_line_length": 38.139074550128534,
"alnum_prop": 0.5710665201771356,
"repo_name": "moonfruit/yyfeed",
"id": "dc0e47f8400e9cb1ceceddd1bd8b679f526d37d5",
"size": "148407",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/bottle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "1691"
},
{
"name": "JavaScript",
"bytes": "900"
},
{
"name": "Python",
"bytes": "377109"
},
{
"name": "Shell",
"bytes": "410"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import webob
from cinder import context
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def volume_get(self, context, volume_id, viewable_admin_meta=False):
if volume_id == fake.VOLUME_ID:
return objects.Volume(context, id=fake.VOLUME_ID,
_name_id=fake.VOLUME2_ID,
host='fake_host', cluster_name=None)
raise exception.VolumeNotFound(volume_id=volume_id)
def api_get_manageable_snapshots(*args, **kwargs):
"""Replacement for cinder.volume.api.API.get_manageable_snapshots."""
snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff'
snaps = [
{'reference': {'source-name': 'snapshot-%s' % snap_id},
'size': 4,
'extra_info': 'qos_setting:high',
'safe_to_manage': False,
'reason_not_safe': 'snapshot in use',
'cinder_id': snap_id,
'source_reference': {'source-name':
'volume-00000000-ffff-0000-ffff-000000'}},
{'reference': {'source-name': 'mysnap'},
'size': 5,
'extra_info': 'qos_setting:low',
'safe_to_manage': True,
'reason_not_safe': None,
'cinder_id': None,
'source_reference': {'source-name': 'myvol'}}]
return snaps
@mock.patch('cinder.volume.api.API.get', volume_get)
class SnapshotManageTest(test.TestCase):
"""Test cases for cinder/api/contrib/snapshot_manage.py
The API extension adds a POST /os-snapshot-manage API that is passed a
cinder volume id, and a driver-specific reference parameter.
If everything is passed correctly,
then the cinder.volume.api.API.manage_existing_snapshot method
is invoked to manage an existing storage object on the host.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.manage_existing_snapshot
with the correct arguments.
"""
def setUp(self):
super(SnapshotManageTest, self).setUp()
self._admin_ctxt = context.RequestContext(fake.USER_ID,
fake.PROJECT_ID,
is_admin=True)
self._non_admin_ctxt = context.RequestContext(fake.USER_ID,
fake.PROJECT_ID,
is_admin=False)
def _get_resp_post(self, body):
"""Helper to execute an os-snapshot-manage API call."""
req = webob.Request.blank('/v2/%s/os-snapshot-manage' %
fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = self._admin_ctxt
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_manage_snapshot_ok(self, mock_db,
mock_create_snapshot, mock_rpcapi):
"""Test successful manage snapshot execution.
Tests for correct operation when valid arguments are passed in the
request body. We ensure that cinder.volume.api.API.manage_existing got
called with the correct arguments, and that we return the correct HTTP
code to the caller.
"""
mock_db.return_value = fake_service.fake_service_obj(
self._admin_ctxt,
binary='cinder-volume')
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(202, res.status_int, res)
# Check the db.service_get was called with correct arguments.
mock_db.assert_called_once_with(
mock.ANY, None, host='fake_host', binary='cinder-volume',
cluster_name=None)
# Check the create_snapshot_in_db was called with correct arguments.
self.assertEqual(1, mock_create_snapshot.call_count)
args = mock_create_snapshot.call_args[0]
named_args = mock_create_snapshot.call_args[1]
self.assertEqual(fake.VOLUME_ID, args[1].get('id'))
# We should commit quota in cinder-volume layer for this operation.
self.assertFalse(named_args['commit_quota'])
# Check the volume_rpcapi.manage_existing_snapshot was called with
# correct arguments.
self.assertEqual(1, mock_rpcapi.call_count)
args = mock_rpcapi.call_args[0]
self.assertEqual('fake_ref', args[2])
@mock.patch('cinder.objects.service.Service.is_up',
return_value=True,
new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_manage_snapshot_disabled(self, mock_db, mock_create_snapshot,
mock_rpcapi, mock_is_up):
"""Test manage snapshot failure due to disabled service."""
mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt,
disabled=True)
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(400, res.status_int, res)
self.assertEqual(exception.ServiceUnavailable.message,
res.json['badRequest']['message'])
mock_create_snapshot.assert_not_called()
mock_rpcapi.assert_not_called()
mock_is_up.assert_not_called()
@mock.patch('cinder.objects.service.Service.is_up', return_value=False,
new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshot_in_db')
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_manage_snapshot_is_down(self, mock_db, mock_create_snapshot,
mock_rpcapi, mock_is_up):
"""Test manage snapshot failure due to down service."""
mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt)
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(400, res.status_int, res)
self.assertEqual(exception.ServiceUnavailable.message,
res.json['badRequest']['message'])
mock_create_snapshot.assert_not_called()
mock_rpcapi.assert_not_called()
self.assertTrue(mock_is_up.called)
def test_manage_snapshot_missing_volume_id(self):
"""Test correct failure when volume_id is not specified."""
body = {'snapshot': {'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_missing_ref(self):
"""Test correct failure when the ref is not specified."""
body = {'snapshot': {'volume_id': fake.VOLUME_ID}}
res = self._get_resp_post(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_error_body(self):
"""Test correct failure when body is invaild."""
body = {'error_snapshot': {'volume_id': fake.VOLUME_ID}}
res = self._get_resp_post(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_error_volume_id(self):
"""Test correct failure when volume can't be found."""
body = {'snapshot': {'volume_id': 'error_volume_id',
'ref': 'fake_ref'}}
res = self._get_resp_post(body)
self.assertEqual(404, res.status_int)
def _get_resp_get(self, host, detailed, paging, admin=True):
"""Helper to execute a GET os-snapshot-manage API call."""
params = {'host': host}
if paging:
params.update({'marker': '1234', 'limit': 10,
'offset': 4, 'sort': 'reference:asc'})
query_string = "?%s" % urlencode(params)
detail = ""
if detailed:
detail = "/detail"
url = "/v2/%s/os-snapshot-manage%s%s" % (fake.PROJECT_ID, detail,
query_string)
req = webob.Request.blank(url)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = (self._admin_ctxt if admin
else self._non_admin_ctxt)
res = req.get_response(app())
return res
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
wraps=api_get_manageable_snapshots)
def test_get_manageable_snapshots_non_admin(self, mock_api_manageable):
res = self._get_resp_get('fakehost', False, False, admin=False)
self.assertEqual(403, res.status_int)
self.assertEqual(False, mock_api_manageable.called)
res = self._get_resp_get('fakehost', True, False, admin=False)
self.assertEqual(403, res.status_int)
self.assertEqual(False, mock_api_manageable.called)
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
wraps=api_get_manageable_snapshots)
def test_get_manageable_snapshots_ok(self, mock_api_manageable):
res = self._get_resp_get('fakehost', False, False)
snap_name = 'snapshot-ffffffff-0000-ffff-0000-ffffffffffff'
exp = {'manageable-snapshots':
[{'reference': {'source-name': snap_name}, 'size': 4,
'safe_to_manage': False,
'source_reference':
{'source-name': 'volume-00000000-ffff-0000-ffff-000000'}},
{'reference': {'source-name': 'mysnap'}, 'size': 5,
'safe_to_manage': True,
'source_reference': {'source-name': 'myvol'}}]}
self.assertEqual(200, res.status_int)
self.assertEqual(jsonutils.loads(res.body), exp)
mock_api_manageable.assert_called_once_with(
self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit,
marker=None, offset=0, sort_dirs=['desc'],
sort_keys=['reference'])
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
side_effect=messaging.RemoteError(
exc_type='InvalidInput', value='marker not found: 1234'))
def test_get_manageable_snapshots_non_existent_marker(
self, mock_api_manageable):
res = self._get_resp_get('fakehost', detailed=False, paging=True)
self.assertEqual(400, res.status_int)
self.assertTrue(mock_api_manageable.called)
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
wraps=api_get_manageable_snapshots)
def test_get_manageable_snapshots_detailed_ok(self, mock_api_manageable):
res = self._get_resp_get('fakehost', True, True)
snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff'
exp = {'manageable-snapshots':
[{'reference': {'source-name': 'snapshot-%s' % snap_id},
'size': 4, 'safe_to_manage': False, 'cinder_id': snap_id,
'reason_not_safe': 'snapshot in use',
'extra_info': 'qos_setting:high',
'source_reference':
{'source-name': 'volume-00000000-ffff-0000-ffff-000000'}},
{'reference': {'source-name': 'mysnap'}, 'size': 5,
'cinder_id': None, 'safe_to_manage': True,
'reason_not_safe': None, 'extra_info': 'qos_setting:low',
'source_reference': {'source-name': 'myvol'}}]}
self.assertEqual(200, res.status_int)
self.assertEqual(jsonutils.loads(res.body), exp)
mock_api_manageable.assert_called_once_with(
self._admin_ctxt, 'fakehost', None, limit=10, marker='1234',
offset=4, sort_dirs=['asc'], sort_keys=['reference'])
@mock.patch('cinder.volume.api.API.get_manageable_snapshots',
side_effect=messaging.RemoteError(
exc_type='InvalidInput', value='marker not found: 1234'))
def test_get_manageable_snapshots_non_existent_marker_detailed(
self, mock_api_manageable):
res = self._get_resp_get('fakehost', detailed=True, paging=True)
self.assertEqual(400, res.status_int)
self.assertTrue(mock_api_manageable.called)
@mock.patch('cinder.objects.service.Service.is_up', return_value=True)
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_get_manageable_snapshots_disabled(self, mock_db, mock_is_up):
mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt,
disabled=True)
res = self._get_resp_get('host_ok', False, True)
self.assertEqual(400, res.status_int, res)
self.assertEqual(exception.ServiceUnavailable.message,
res.json['badRequest']['message'])
mock_is_up.assert_not_called()
@mock.patch('cinder.objects.service.Service.is_up', return_value=False,
new_callable=mock.PropertyMock)
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_get_manageable_snapshots_is_down(self, mock_db, mock_is_up):
mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt)
res = self._get_resp_get('host_ok', False, True)
self.assertEqual(400, res.status_int, res)
self.assertEqual(exception.ServiceUnavailable.message,
res.json['badRequest']['message'])
self.assertTrue(mock_is_up.called)
| {
"content_hash": "6fa48da71c624a5da67a1d97190ac741",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 47.39087947882736,
"alnum_prop": 0.607876830022682,
"repo_name": "ge0rgi/cinder",
"id": "17cc03b1edda327afa5115a67338c9c496dd0ff1",
"size": "15208",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/api/contrib/test_snapshot_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
import sys
from pkg_resources import parse_version
from httplib import HTTPConnection
from xml.etree import ElementTree
from util import cacheable, is_release
PYTHON_HG_HOST = 'hg.python.org'
PYTHON_HG_PATH = '/cpython/atom-tags'
PYTHON_DOWNLOAD_URL = 'http://www.python.org/ftp/python/%(v)s/Python-%(v)s.tgz'
ATOM_NS = 'http://www.w3.org/2005/Atom'
ATOM_XPATH = './/{%(ns)s}entry/{%(ns)s}content' % {'ns': ATOM_NS}
@cacheable
def all_versions():
versions = []
try:
conn = HTTPConnection(PYTHON_HG_HOST)
conn.request('GET', PYTHON_HG_PATH)
response = conn.getresponse()
if response.status != 200:
raise RuntimeError('Cannot retrieve versions list at this time. '
' (HTTP error %d' % response.status)
body = response.read()
root = ElementTree.fromstring(body)
versions = sorted([v.text[1:] for v in root.iterfind(ATOM_XPATH)],
key=lambda x: parse_version(x),
reverse=True)
except Exception as ex:
sys.stdout.write(ex.message)
sys.exit(1)
return versions
def all_releases():
# TODO: Move _is_release logic into all_releases() to reduce duplication
# in all of the methods that follow
return [x for x in all_versions() if is_release(x)]
def series_versions(series=None):
if not series:
return all_versions()
series_prefix = series + '.'
return [r for r in all_versions()
if r == series or series_prefix == r[0:len(series_prefix)]]
def series_releases(series=None):
if not series:
return all_releases()
series_prefix = series + '.'
return [r for r in all_releases()
if r == series or series_prefix == r[0:len(series_prefix)]]
def last_version(series=None):
versions = series_versions(series)
return versions[0] if versions else None
def last_release(series=None):
versions = series_releases(series) if series else all_releases()
return versions[0] if versions else None
| {
"content_hash": "074ce7da85275d982fdd7a97e3642e29",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 28.34246575342466,
"alnum_prop": 0.6283228612856452,
"repo_name": "briancline/pyruse",
"id": "1889c9ea5198f482ed7d7e6c49ba828362654fa2",
"size": "2069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyruse/versions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6498"
}
],
"symlink_target": ""
} |
from rest_framework.parsers import JSONParser
from drf_hal_json import HAL_JSON_MEDIA_TYPE
from drf_hal_json.renderers import JsonHalRenderer
class JsonHalParser(JSONParser):
media_type = HAL_JSON_MEDIA_TYPE
renderer_class = JsonHalRenderer
| {
"content_hash": "ac6a2563c992b05eb942f50171eceadb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 50,
"avg_line_length": 28,
"alnum_prop": 0.7976190476190477,
"repo_name": "seebass/drf-hal-json",
"id": "15f39f39c2656840b39c2ef5f0a4da90d5f18245",
"size": "252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "drf_hal_json/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17767"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1
async def sample_import_feature_values():
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
avro_source = aiplatform_v1.AvroSource()
avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
feature_specs = aiplatform_v1.FeatureSpec()
feature_specs.id = "id_value"
request = aiplatform_v1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
entity_type="entity_type_value",
feature_specs=feature_specs,
)
# Make the request
operation = client.import_feature_values(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_async]
| {
"content_hash": "78e809638029e8b1d08d5cbe07937737",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 88,
"avg_line_length": 29.4375,
"alnum_prop": 0.7070063694267515,
"repo_name": "googleapis/python-aiplatform",
"id": "fb38d59f8bde2cb13b37f11cce310ce168156122",
"size": "1977",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
import os
from PIL import Image
# VGG16 input size...
compressionSize = 224, 224
# Delete a gif once it's been split into frames
removeProcessedGifs = False
def iter_frames(im):
try:
i= 0
while 1:
im.seek(i)
imframe = im.copy()
if i == 0:
palette = imframe.getpalette()
else:
imframe.putpalette(palette)
yield imframe
i += 1
except EOFError:
pass
def gen_frames(im, name):
for i, frame in enumerate(iter_frames(im)):
x = (name + '_%d.png') % i
frame.thumbnail(compressionSize, Image.ANTIALIAS)
frame.save(x, optimize=True, quality=100, **frame.info)
frame.close()
for i in range(0, 102068):
ii = str(i)
if os.path.isfile('./gifs/' + ii + '.gif'):
print i
try:
os.mkdir('./gifs/' + ii)
im = Image.open('./gifs/' + ii + '.gif')
gen_frames(im, './gifs/' + ii + '/' + ii)
except Exception:
continue
im.close()
if removeProcessedGifs:
os.remove('./gifs/' + ii + '.gif')
| {
"content_hash": "36d47392f57188a2bdcbd4c4965209c0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 63,
"avg_line_length": 26.15909090909091,
"alnum_prop": 0.5082536924413553,
"repo_name": "chcaru/gcnet",
"id": "e9375eb478e63c59e6de7a8067e3249e5055fe2c",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prepareGifs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1671"
},
{
"name": "Python",
"bytes": "21020"
}
],
"symlink_target": ""
} |
import m5
from m5.objects import *
# This configuration shows a simple setup of a TrafficGen (CPU) and an
# external TLM port for SystemC co-simulation
#
# Base System Architecture:
# +-------------+ +-----+ ^
# | System Port | | CPU | |
# +-------+-----+ +--+--+ |
# | | | gem5 World
# | +----+ | (see this file)
# | | |
# +-------v------v-------+ |
# | Membus | v
# +----------------+-----+ External Port (see sc_slave_port.*)
# | ^
# +---v---+ | TLM World
# | TLM | | (see sc_target.*)
# +-------+ v
#
# Create a system with a Crossbar and a TrafficGenerator as CPU:
system = System()
system.membus = IOXBar(width = 16)
system.physmem = SimpleMemory() # This must be instanciated, even if not needed
system.cpu = TrafficGen(config_file = "conf/tgen.cfg")
system.clk_domain = SrcClockDomain(clock = '1.5GHz',
voltage_domain = VoltageDomain(voltage = '1V'))
# Create a external TLM port:
system.tlm = ExternalSlave()
system.tlm.addr_ranges = [AddrRange('512MB')]
system.tlm.port_type = "tlm_slave"
system.tlm.port_data = "transactor"
# Route the connections:
system.cpu.port = system.membus.slave
system.system_port = system.membus.slave
system.membus.master = system.tlm.port
# Start the simulation:
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate() #Simulation time specified later on commandline
| {
"content_hash": "2549b22f922318cafc223d888d8085f5",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 33.82608695652174,
"alnum_prop": 0.5649100257069408,
"repo_name": "TUD-OS/gem5-dtu",
"id": "ebf403fcbd2a03bf3b1f4f0af941b1dbe9d0abbf",
"size": "3151",
"binary": false,
"copies": "9",
"ref": "refs/heads/dtu-mmu",
"path": "util/tlm/conf/tlm_slave.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
import math
import numpy as np
class Activation(object):
def __init__(self):
pass
@staticmethod
def get(name):
if name == "sigmoid":
return Activation.sigmoid
if name == "np_sigmoid":
return Activation.np_sigmoid
return None
@staticmethod
def get_d(name):
if name == "sigmoid":
return Activation.d_sigmoid
if name == "np_sigmoid":
return Activation.np_d_sigmoid
return None
@staticmethod
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
@staticmethod
def d_sigmoid(x):
return Activation.sigmoid(x) * (1.0 - Activation.sigmoid(x))
@staticmethod
def np_sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@staticmethod
def np_d_sigmoid(x):
return Activation.np_sigmoid(x) * (1.0 - Activation.np_sigmoid(x))
| {
"content_hash": "07ab80dea95efef796f32bc2f8b7bf5b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 22.375,
"alnum_prop": 0.5631284916201117,
"repo_name": "awlange/brainsparks",
"id": "8bb04afaca74b22c564beaa04b9d543610e22a13",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sandbox/activation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1107074"
}
],
"symlink_target": ""
} |
from functools import wraps
'''
class debug() :
def __init__(self,*args,**kwargs) :
self.args = args
def __call__(self,func) :
@wraps(func)
def wrapper(*args,**kwargs) :
print('entering ' + func.__name__)
return(func(*args))
print('exiting ' + func.__name__)
return wrapper
class logs() :
def __init__(self,*Args,**kwargs) :
self.args = args
print(self.args)
def __call__(self,func) :
@wraps(func)
def wrapper(*Args,**kwargs) :
print('to be edited as of now')
return wrapper
'''
def debug(*arg,**kwarg) :
def wrapper_function(func) :
def wrapper(*args,**kwargs) :
print('entering ' + func.__name__)
return func(*args,**kwargs)
print('exiting ' + func.__name__)
return wrapper
return wrapper_function | {
"content_hash": "cc40daa450953c7f4d727d2d9e93c999",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 37,
"avg_line_length": 21.25,
"alnum_prop": 0.6091503267973856,
"repo_name": "projectscara2014/scara",
"id": "658b6fd3bdcefcf830c5b0ddee6fdcce3b383dfb",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "working_directory/utils/debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "11964"
},
{
"name": "Python",
"bytes": "100055"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import celery
import djcelery
import sys
from django.core.management.base import BaseCommand
from djcelery.compat import setenv
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '{0}' \
was created in thread id {1} and this is thread id {2}.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing and
self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = ()
if hasattr(BaseCommand, 'option_list'):
options = BaseCommand.option_list
else:
def add_arguments(self, parser):
option_typemap = {
"string": str,
"int": int,
"float": float
}
for opt in self.option_list:
option = {k: v
for k, v in opt.__dict__.items()
if v is not None}
flags = (option.get("_long_opts", []) +
option.get("_short_opts", []))
if option.get('default') == ('NO', 'DEFAULT'):
option['default'] = None
if option.get("nargs") == 1:
del option["nargs"]
del option["_long_opts"]
del option["_short_opts"]
if "type" in option:
opttype = option["type"]
option["type"] = option_typemap.get(opttype, opttype)
parser.add_argument(*flags, **option)
skip_opts = ['--app', '--loader', '--config', '--no-color']
requires_system_checks = False
keep_base_opts = False
stdout, stderr = sys.stdout, sys.stderr
def get_version(self):
return 'celery {c.__version__}\ndjango-celery {d.__version__}'.format(
c=celery, d=djcelery,
)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
setenv('CELERY_BROKER_URL', broker)
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
# --settings and --pythonpath are also handled
# by BaseCommand.handle_default_options, but that is
# called with the resulting options parsed by optparse.
if '--settings=' in arg:
_, settings_module = arg.split('=')
setenv('DJANGO_SETTINGS_MODULE', settings_module)
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
def _is_unwanted_option(self, option):
return option._long_opts and option._long_opts[0] in self.skip_opts
@property
def option_list(self):
return [x for x in self.options if not self._is_unwanted_option(x)]
| {
"content_hash": "abc27847b1ef617fa24701a1b791d8c5",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 34.30714285714286,
"alnum_prop": 0.5407037268373933,
"repo_name": "axiom-data-science/django-celery",
"id": "f8807af584855f692e69708f5e594e73859f38b7",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djcelery/management/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82"
},
{
"name": "HTML",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "166417"
},
{
"name": "Shell",
"bytes": "1905"
}
],
"symlink_target": ""
} |
import mock
from openstack.tests.unit import base
import uuid
from openstack.message.v2 import queue
FAKE1 = {
'name': 'test_queue',
'description': 'Queue used for test.',
'_default_message_ttl': 3600,
'_max_messages_post_size': 262144
}
FAKE2 = {
'name': 'test_queue',
'description': 'Queue used for test.',
'_default_message_ttl': 3600,
'_max_messages_post_size': 262144,
'client_id': 'OLD_CLIENT_ID',
'project_id': 'OLD_PROJECT_ID'
}
class TestQueue(base.TestCase):
def test_basic(self):
sot = queue.Queue()
self.assertEqual('queues', sot.resources_key)
self.assertEqual('/queues', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = queue.Queue.new(**FAKE2)
self.assertEqual(FAKE1['description'], sot.description)
self.assertEqual(FAKE1['name'], sot.name)
self.assertEqual(FAKE1['name'], sot.id)
self.assertEqual(FAKE1['_default_message_ttl'],
sot.default_message_ttl)
self.assertEqual(FAKE1['_max_messages_post_size'],
sot.max_messages_post_size)
self.assertEqual(FAKE2['client_id'], sot.client_id)
self.assertEqual(FAKE2['project_id'], sot.project_id)
@mock.patch.object(uuid, 'uuid4')
def test_create(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.put.return_value = resp
sess.get_project_id.return_value = 'NEW_PROJECT_ID'
mock_uuid.return_value = 'NEW_CLIENT_ID'
sot = queue.Queue(**FAKE1)
sot._translate_response = mock.Mock()
res = sot.create(sess)
url = 'queues/%s' % FAKE1['name']
headers = {'Client-ID': 'NEW_CLIENT_ID',
'X-PROJECT-ID': 'NEW_PROJECT_ID'}
sess.put.assert_called_with(url,
headers=headers, json=FAKE1)
sess.get_project_id.assert_called_once_with()
sot._translate_response.assert_called_once_with(resp, has_body=False)
self.assertEqual(sot, res)
def test_create_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.put.return_value = resp
sot = queue.Queue(**FAKE2)
sot._translate_response = mock.Mock()
res = sot.create(sess)
url = 'queues/%s' % FAKE2['name']
headers = {'Client-ID': 'OLD_CLIENT_ID',
'X-PROJECT-ID': 'OLD_PROJECT_ID'}
sess.put.assert_called_with(url,
headers=headers, json=FAKE1)
sot._translate_response.assert_called_once_with(resp, has_body=False)
self.assertEqual(sot, res)
@mock.patch.object(uuid, 'uuid4')
def test_get(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.get.return_value = resp
sess.get_project_id.return_value = 'NEW_PROJECT_ID'
mock_uuid.return_value = 'NEW_CLIENT_ID'
sot = queue.Queue(**FAKE1)
sot._translate_response = mock.Mock()
res = sot.fetch(sess)
url = 'queues/%s' % FAKE1['name']
headers = {'Client-ID': 'NEW_CLIENT_ID',
'X-PROJECT-ID': 'NEW_PROJECT_ID'}
sess.get.assert_called_with(url,
headers=headers)
sess.get_project_id.assert_called_once_with()
sot._translate_response.assert_called_once_with(resp)
self.assertEqual(sot, res)
def test_get_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.get.return_value = resp
sot = queue.Queue(**FAKE2)
sot._translate_response = mock.Mock()
res = sot.fetch(sess)
url = 'queues/%s' % FAKE2['name']
headers = {'Client-ID': 'OLD_CLIENT_ID',
'X-PROJECT-ID': 'OLD_PROJECT_ID'}
sess.get.assert_called_with(url,
headers=headers)
sot._translate_response.assert_called_once_with(resp)
self.assertEqual(sot, res)
@mock.patch.object(uuid, 'uuid4')
def test_delete(self, mock_uuid):
sess = mock.Mock()
resp = mock.Mock()
sess.delete.return_value = resp
sess.get_project_id.return_value = 'NEW_PROJECT_ID'
mock_uuid.return_value = 'NEW_CLIENT_ID'
sot = queue.Queue(**FAKE1)
sot._translate_response = mock.Mock()
sot.delete(sess)
url = 'queues/%s' % FAKE1['name']
headers = {'Client-ID': 'NEW_CLIENT_ID',
'X-PROJECT-ID': 'NEW_PROJECT_ID'}
sess.delete.assert_called_with(url,
headers=headers)
sess.get_project_id.assert_called_once_with()
sot._translate_response.assert_called_once_with(resp, has_body=False)
def test_delete_client_id_project_id_exist(self):
sess = mock.Mock()
resp = mock.Mock()
sess.delete.return_value = resp
sot = queue.Queue(**FAKE2)
sot._translate_response = mock.Mock()
sot.delete(sess)
url = 'queues/%s' % FAKE2['name']
headers = {'Client-ID': 'OLD_CLIENT_ID',
'X-PROJECT-ID': 'OLD_PROJECT_ID'}
sess.delete.assert_called_with(url,
headers=headers)
sot._translate_response.assert_called_once_with(resp, has_body=False)
| {
"content_hash": "649aaeb20995987bfc38463bb219073f",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 77,
"avg_line_length": 35.13291139240506,
"alnum_prop": 0.5694469464961268,
"repo_name": "dtroyer/python-openstacksdk",
"id": "1183076f12c5c36786aab6647d1fb270bc24b828",
"size": "6097",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/message/v2/test_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3803161"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate()
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate()
longpollid = template['longpollid']
# longpollid should not change between successive invocations if nothing else happens
template2 = self.nodes[0].getblocktemplate()
assert(template2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| {
"content_hash": "b3c139e5d812f5d948e2e5f467f1266f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 112,
"avg_line_length": 42.6231884057971,
"alnum_prop": 0.666780006800408,
"repo_name": "Flowdalic/bitcoin",
"id": "1259754c5af154b1f0a3888e04b0f96b97811a32",
"size": "3155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/mining_getblocktemplate_longpoll.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695383"
},
{
"name": "C++",
"bytes": "5989645"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "195951"
},
{
"name": "Makefile",
"bytes": "117100"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "6594"
},
{
"name": "Python",
"bytes": "1449977"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "88254"
}
],
"symlink_target": ""
} |
"""The metrics module implements functions assessing prediction error for specific purposes."""
import numpy as np
def trapz(x, y):
"""Trapezoidal rule for integrating
the curve defined by x-y pairs.
Assume x and y are in the range [0,1]
"""
assert len(x) == len(y), 'x and y need to be of same length'
x = np.concatenate([x, array([0.0, 1.0])])
y = np.concatenate([y, array([0.0, 1.0])])
sort_idx = np.argsort(x)
sx = x[sort_idx]
sy = y[sort_idx]
area = 0.0
for ix in range(len(x)-1):
area += 0.5*(sx[ix+1]-sx[ix])*(sy[ix+1]+sy[ix])
return area
| {
"content_hash": "ec4725ad88977a25e891f2c38394f1fd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 95,
"avg_line_length": 30.45,
"alnum_prop": 0.5960591133004927,
"repo_name": "likojack/isml15-thu",
"id": "29d76b234ac0091b6b890697c4199e84a8019777",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "889"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 Serendio Inc.
Author - kshitij soni
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
from numpy import mean
import numpy as np
import pandas as pd
import csv
from collections import defaultdict
from pandas import DataFrame, Series
from StringIO import StringIO
import scipy
import matplotlib.pyplot
import matplotlib.pyplot as plt
import math as mt
import scipy.stats as stats
def mydeviate(str,list,Deviation=0,MeanAbsDeviation=1,MeanSqDev=0):
s=list
w= pd.read_csv(str,usecols=s)
s=DataFrame(w)
t= s.mean()
if Deviation==1:
b=[w-t]
print b
if MeanAbsDeviation==1:
a=[abs(s)-t]
print(a)
if MeanSqDev==1:
c=[(w-t)**2]
print c
return
| {
"content_hash": "a18196989f4efc0082da02f66dd2ea0f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 168,
"avg_line_length": 21.155172413793103,
"alnum_prop": 0.7180114099429503,
"repo_name": "serendio-labs-stage/diskoveror-data-preprocessing",
"id": "7a403a9b88a4ecf14af7c0e99d88a44bd9d71f85",
"size": "1227",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "premodelling routines/deviation/deviate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11754"
}
],
"symlink_target": ""
} |
on_time_dataframe = spark.read.parquet('data/on_time_performance.parquet')
# Register the data for Spark SQL
on_time_dataframe.registerTempTable("on_time_performance")
# Check out the columns
on_time_dataframe.columns
# Check out some data
on_time_dataframe\
.select("FlightDate", "TailNum", "Origin", "Dest", "Carrier", "DepDelay", "ArrDelay")\
.show()
# Trim the fields and keep the result
trimmed_on_time = on_time_dataframe\
.select(
"FlightDate",
"TailNum",
"Origin",
"Dest",
"Carrier",
"DepDelay",
"ArrDelay"
)
# Sample 0.01% of the data and show
trimmed_on_time.sample(False, 0.0001).show()
| {
"content_hash": "180b80699986e32f5e5258f232e0cefe",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6823161189358372,
"repo_name": "rjurney/Agile_Data_Code_2",
"id": "e4b03cf40260fa7f45f802e3723286dc9c536bb8",
"size": "695",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ch02/load_on_time_performance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51800"
},
{
"name": "Dockerfile",
"bytes": "11150"
},
{
"name": "HTML",
"bytes": "70450"
},
{
"name": "JavaScript",
"bytes": "62804"
},
{
"name": "Jupyter Notebook",
"bytes": "5468329"
},
{
"name": "Python",
"bytes": "270069"
},
{
"name": "Shell",
"bytes": "31732"
}
],
"symlink_target": ""
} |
def tuplify(a):
try:
return tuple(a)
except TypeError:
return (a,)
| {
"content_hash": "262dab360304831b1d99074ca39e6be2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 23,
"avg_line_length": 13.285714285714286,
"alnum_prop": 0.5161290322580645,
"repo_name": "JohnVinyard/zounds",
"id": "1e26f0ed7ed6ceeaf4ad3336b4180d83485fc5aa",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zounds/util/handy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1511"
},
{
"name": "HTML",
"bytes": "5376"
},
{
"name": "JavaScript",
"bytes": "11191"
},
{
"name": "Python",
"bytes": "743652"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
import os
import sys
import click
from . import server_backends
from .util import honcho_parse_env
try:
import importlib
except ImportError:
click.echo('You do not have importlib installed. Please install a '
'backport for versions < 2.7/3.1 first.')
sys.exit(1)
ENV_DEFAULT = '.env'
APP_ENVVAR = 'FLASK_APP'
@click.group(invoke_without_command=True)
@click.option('--app', '-a', 'app_name', envvar=APP_ENVVAR,
help='App to import')
@click.option('--configfile', '-c',
type=click.Path(exists=True, dir_okay=False),
help='Configuration file to pass as the first parameter to '
'create_app')
@click.option('--env', '-e', default=None,
type=click.Path(exists=True, dir_okay=False),
help='Load environment variables from file (default: "{}")'
.format(ENV_DEFAULT))
@click.pass_context
def cli(ctx, app_name, configfile, env):
extra_files = []
if configfile:
extra_files.append(configfile)
if env is None and os.path.exists(ENV_DEFAULT):
env = ENV_DEFAULT
if env:
extra_files.append(env)
buf = open(env).read()
os.environ.update(honcho_parse_env(buf))
# disabled: this functionality will be hard if not impossible to
# implemend in flask 1.0. disable it for now
# if app_name is None and APP_ENVVAR in os.environ:
# app_name = os.environ[APP_ENVVAR]
if app_name is None:
click.echo('No --app parameter and FLASK_APP is not set.')
sys.exit(1)
mod = importlib.import_module(app_name)
app = mod.create_app(configfile)
obj = {}
obj['app'] = app
obj['extra_files'] = extra_files
obj['app_mod'] = mod
ctx.obj = obj
if ctx.invoked_subcommand is None:
ctx.invoke(dev)
@cli.command(
help='Imports a module passed on the commandline, instantiates an app by '
'calling imported_module.create_app() with an optional configuration '
'file and runs it in debug mode.'
)
@click.option('--debug/--no-debug', '-d/-D', default=True,
help='Enabled/disable debug (enabled by default)')
@click.option('--hostname', '-H', default='localhost',
help='Hostname to bind to. Defaults to localhost')
@click.option('--port', '-p', type=int, default=5000,
help='Port to listen on. Defaults to 5000')
@click.option('--ssl', '-S', flag_value='adhoc', default=None,
help='Enable SSL with a self-signed cert')
@click.option('--flask-debug/--no-flask-debug', '-e/-E', default=None,
help='Enable/disable Flask-Debug or Flask-DebugToolbar '
'extensions (default: same as --debug)')
@click.pass_obj
def dev(obj, debug, hostname, port, ssl, flask_debug):
app = obj['app']
msgs = []
if flask_debug is None:
flask_debug = debug
Debug = None
DebugToolbarExtension = None
if flask_debug:
try:
from flask_debug import Debug
except ImportError:
pass
try:
from flask_debugtoolbar import DebugToolbarExtension
except ImportError:
pass
if Debug:
Debug(app)
app.config['SERVER_NAME'] = '{}:{}'.format(hostname, port)
# taking off the safety wheels
app.config['FLASK_DEBUG_DISABLE_STRICT'] = True
if DebugToolbarExtension:
# Flask-Debugtoolbar does not check for debugging settings at runtime.
# this hack enabled debugging if desired before initializing the
# extension
if debug:
app.debug = True
# set the SECRET_KEY, but only if we're in debug-mode
if not app.config.get('SECRET_KEY', None):
msgs.append('SECRET_KEY not set, using insecure "devkey"')
app.config['SECRET_KEY'] = 'devkey'
DebugToolbarExtension(app)
def on_off(ext):
return 'on' if ext is not None else 'off'
msgs.insert(0, 'Flask-Debug: {}'.format(on_off(Debug)))
msgs.insert(0, 'Flask-DebugToolbar: {}'.format(
on_off(DebugToolbarExtension))
)
if msgs:
click.echo(' * {}'.format(', '.join(msgs)))
app.run(hostname, port, ssl_context=ssl, debug=debug,
extra_files=obj['extra_files'])
@cli.command()
@click.option('--hostname', '-H', default='0.0.0.0',
help='Hostname to bind to. Defaults to 0.0.0.0')
@click.option('--port', '-p', type=int, default=80,
help='Port to listen on. Defaults to 80')
@click.option('--backends', '-b',
default=server_backends.DEFAULT,
help='Comma-separated list of backends to try')
@click.pass_obj
def serve(obj, hostname, port, backends):
app = obj['app']
for backend in backends.split(','):
func = getattr(server_backends, backend.replace('-', '_'), None)
if not callable(func):
click.echo('Not a valid backend: {}'.format(backend))
continue
click.echo('Trying backend {}'.format(backend))
try:
if func(app, hostname, port) is None:
continue
break
except RuntimeError as e:
click.echo(str(e), err=True)
sys.exit(1)
except ImportError:
continue
else:
click.echo('Exhausted list of possible backends', err=True)
sys.exit(1)
@cli.group()
@click.option('--model', '-m', default='.model',
help='Name of the module that contains the model')
@click.option('--db', '-d', default='db',
help='SQLAlchemy instance name')
@click.option('--echo/--no-echo', '-e/-E', default=True,
help='Overrides SQLALCHEMY_ECHO')
@click.pass_obj
def db(obj, model, db, echo):
model_mod = importlib.import_module(model, obj['app_mod'].__package__)
db_obj = getattr(model_mod, db)
obj['db'] = db_obj
obj['app'].config['SQLALCHEMY_ECHO'] = True
with obj['app'].app_context():
click.echo('Connected to database: {}'.format(obj['db']))
@db.command()
@click.pass_obj
def reset(obj):
db = obj['db']
with obj['app'].app_context():
click.echo('Resetting database')
db.drop_all()
db.create_all()
| {
"content_hash": "8203fb1d8cdcca481ece7480fcfc9cef",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 30.852941176470587,
"alnum_prop": 0.592310136638068,
"repo_name": "brettatoms/flask-appconfig",
"id": "87fe4b5eb3c0caf08177b20fe646659b4c970d23",
"size": "6294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_appconfig/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18273"
}
],
"symlink_target": ""
} |
"""Flattens a HTML file by inlining its external resources.
This is a small script that takes a HTML file, looks for src attributes
and inlines the specified file, producing one HTML file with no external
dependencies.
This does not inline anything referenced from an inlined file.
"""
import os
import re
import sys
import base64
import mimetypes
from grit.node import base
DIST_DEFAULT = 'chromium'
DIST_ENV_VAR = 'CHROMIUM_BUILD'
DIST_SUBSTR = '%DISTRIBUTION%'
# Matches beginning of an "if" block with trailing spaces.
_BEGIN_IF_BLOCK = re.compile('<if [^>]*?expr="(?P<expression>[^"]*)"[^>]*?>\s*')
# Matches ending of an "if" block with preceding spaces.
_END_IF_BLOCK = re.compile('\s*</if>')
def ReadFile(input_filename):
"""Helper function that returns input_filename as a string.
Args:
input_filename: name of file to be read
Returns:
string
"""
f = open(input_filename, 'rb')
file_contents = f.read()
f.close()
return file_contents
def SrcInlineAsDataURL(src_match, base_path, distribution, inlined_files):
"""regex replace function.
Takes a regex match for src="filename", attempts to read the file
at 'filename' and returns the src attribute with the file inlined
as a data URI. If it finds DIST_SUBSTR string in file name, replaces
it with distribution.
Args:
src_match: regex match object with 'filename' named capturing group
base_path: path that to look for files in
distribution: string that should replace DIST_SUBSTR
Returns:
string
"""
filename = src_match.group('filename')
if filename.find(':') != -1:
# filename is probably a URL, which we don't want to bother inlining
return src_match.group(0)
filename = filename.replace('%DISTRIBUTION%', distribution)
filepath = os.path.join(base_path, filename)
mimetype = mimetypes.guess_type(filename)[0] or 'text/plain'
inlined_files.add(filepath)
inline_data = base64.standard_b64encode(ReadFile(filepath))
prefix = src_match.string[src_match.start():src_match.start('filename')-1]
return "%s\"data:%s;base64,%s\"" % (prefix, mimetype, inline_data)
class InlinedData:
"""Helper class holding the results from DoInline().
Holds the inlined data and the set of filenames of all the inlined
files.
"""
def __init__(self, inlined_data, inlined_files):
self.inlined_data = inlined_data
self.inlined_files = inlined_files
def DoInline(input_filename, grd_node, allow_external_script=False):
"""Helper function that inlines the resources in a specified file.
Reads input_filename, finds all the src attributes and attempts to
inline the files they are referring to, then returns the result and
the set of inlined files.
Args:
input_filename: name of file to read in
grd_node: html node from the grd file for this include tag
Returns:
a tuple of the inlined data as a string and the set of filenames
of all the inlined files
"""
input_filepath = os.path.dirname(input_filename)
distribution = DIST_DEFAULT
if DIST_ENV_VAR in os.environ.keys():
distribution = os.environ[DIST_ENV_VAR]
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:].lower()
# Keep track of all the files we inline.
inlined_files = set()
def SrcReplace(src_match, filepath=input_filepath,
inlined_files=inlined_files):
"""Helper function to provide SrcInlineAsDataURL with the base file path"""
return SrcInlineAsDataURL(src_match, filepath, distribution, inlined_files)
def GetFilepath(src_match):
filename = src_match.group('filename')
if filename.find(':') != -1:
# filename is probably a URL, which we don't want to bother inlining
return None
filename = filename.replace('%DISTRIBUTION%', distribution)
return os.path.join(input_filepath, filename)
def IsConditionSatisfied(src_match):
expression = src_match.group('expression')
return grd_node is None or grd_node.EvaluateCondition(expression)
def CheckConditionalElements(str):
"""Helper function to conditionally inline inner elements"""
while True:
begin_if = _BEGIN_IF_BLOCK.search(str)
if begin_if is None:
return str
condition_satisfied = IsConditionSatisfied(begin_if)
leading = str[0:begin_if.start()]
content_start = begin_if.end()
# Find matching "if" block end.
count = 1
pos = begin_if.end()
while True:
end_if = _END_IF_BLOCK.search(str, pos)
if end_if is None:
raise Exception('Unmatched <if>')
next_if = _BEGIN_IF_BLOCK.search(str, pos)
if next_if is None or next_if.start() >= end_if.end():
count = count - 1
if count == 0:
break
pos = end_if.end()
else:
count = count + 1
pos = next_if.end()
content = str[content_start:end_if.start()]
trailing = str[end_if.end():]
if condition_satisfied:
str = leading + CheckConditionalElements(content) + trailing
else:
str = leading + trailing
def InlineFileContents(src_match, pattern, inlined_files=inlined_files):
"""Helper function to inline external script and css files"""
filepath = GetFilepath(src_match)
if filepath is None:
return src_match.group(0)
inlined_files.add(filepath)
return pattern % ReadFile(filepath)
def InlineIncludeFiles(src_match):
"""Helper function to inline external script files"""
return InlineFileContents(src_match, '%s')
def InlineScript(src_match):
"""Helper function to inline external script files"""
return InlineFileContents(src_match, '<script>%s</script>')
def InlineCSSText(text, css_filepath):
"""Helper function that inlines external resources in CSS text"""
filepath = os.path.dirname(css_filepath)
return InlineCSSImages(text, filepath)
def InlineCSSFile(src_match, inlined_files=inlined_files):
"""Helper function to inline external css files.
Args:
src_match: A regular expression match with a named group named "filename".
Returns:
The text that should replace the reference to the CSS file.
"""
filepath = GetFilepath(src_match)
if filepath is None:
return src_match.group(0)
inlined_files.add(filepath)
# When resolving CSS files we need to pass in the path so that relative URLs
# can be resolved.
return '<style>%s</style>' % InlineCSSText(ReadFile(filepath), filepath)
def InlineCSSImages(text, filepath=input_filepath):
"""Helper function that inlines external images in CSS backgrounds."""
return re.sub('(?:content|background(?:-image)?|border-image):[ ]*' +
'url\((?:\'|\")(?P<filename>[^"\'\)\(]*)(?:\'|\")',
lambda m: SrcReplace(m, filepath),
text)
flat_text = ReadFile(input_filename)
if not allow_external_script:
# We need to inline css and js before we inline images so that image
# references gets inlined in the css and js
flat_text = re.sub('<script .*?src="(?P<filename>[^"\']*)".*?></script>',
InlineScript,
flat_text)
flat_text = re.sub(
'<link rel="stylesheet".+?href="(?P<filename>[^"]*)".*?>',
InlineCSSFile,
flat_text)
flat_text = re.sub(
'<include\s+src="(?P<filename>[^"\']*)".*>',
InlineIncludeFiles,
flat_text)
# Check conditional elements, remove unsatisfied ones from the file.
flat_text = CheckConditionalElements(flat_text)
flat_text = re.sub('<(?!script)[^>]+?src="(?P<filename>[^"\']*)"',
SrcReplace,
flat_text)
# TODO(arv): Only do this inside <style> tags.
flat_text = InlineCSSImages(flat_text)
flat_text = re.sub('<link rel="icon".+?href="(?P<filename>[^"\']*)"',
SrcReplace,
flat_text)
return InlinedData(flat_text, inlined_files)
def InlineToString(input_filename, grd_node, allow_external_script=False):
"""Inlines the resources in a specified file and returns it as a string.
Args:
input_filename: name of file to read in
grd_node: html node from the grd file for this include tag
Returns:
the inlined data as a string
"""
try:
return DoInline(input_filename,
grd_node,
allow_external_script=allow_external_script).inlined_data
except IOError, e:
raise Exception("Failed to open %s while trying to flatten %s. (%s)" %
(e.filename, input_filename, e.strerror))
def InlineToFile(input_filename, output_filename, grd_node):
"""Inlines the resources in a specified file and writes it.
Reads input_filename, finds all the src attributes and attempts to
inline the files they are referring to, then writes the result
to output_filename.
Args:
input_filename: name of file to read in
output_filename: name of file to be written to
grd_node: html node from the grd file for this include tag
Returns:
a set of filenames of all the inlined files
"""
inlined_data = InlineToString(input_filename, grd_node)
out_file = open(output_filename, 'wb')
out_file.writelines(inlined_data)
out_file.close()
def GetResourceFilenames(filename):
"""For a grd file, returns a set of all the files that would be inline."""
try:
return DoInline(filename, None).inlined_files
except IOError, e:
raise Exception("Failed to open %s while trying to flatten %s. (%s)" %
(e.filename, filename, e.strerror))
def main():
if len(sys.argv) <= 2:
print "Flattens a HTML file by inlining its external resources.\n"
print "html_inline.py inputfile outputfile"
else:
InlineToFile(sys.argv[1], sys.argv[2], None)
if __name__ == '__main__':
main()
| {
"content_hash": "6fb2de9970b3f9453d725b598ab00d5e",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 80,
"avg_line_length": 32.61258278145695,
"alnum_prop": 0.6649406031069144,
"repo_name": "meego-tablet-ux/meego-app-browser",
"id": "06805bb06a1497476a3d1c4b6b2e6e99dadeab1f",
"size": "10034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/grit/grit/format/html_inline.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "5599"
},
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "1871"
},
{
"name": "C",
"bytes": "1646303"
},
{
"name": "C++",
"bytes": "72324607"
},
{
"name": "CSS",
"bytes": "221604"
},
{
"name": "Diff",
"bytes": "11193"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "21930015"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "5339242"
},
{
"name": "Makefile",
"bytes": "2412"
},
{
"name": "Objective-C",
"bytes": "691329"
},
{
"name": "Objective-C++",
"bytes": "3786548"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "PLpgSQL",
"bytes": "70415"
},
{
"name": "Perl",
"bytes": "63704"
},
{
"name": "Protocol Buffer",
"bytes": "96399"
},
{
"name": "Python",
"bytes": "2296716"
},
{
"name": "QML",
"bytes": "452612"
},
{
"name": "QMake",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "200146"
}
],
"symlink_target": ""
} |
import configparser
from vent.helpers.errors import ErrorHandler
class Template:
""" Handle parsing templates """
def __init__(self, template=None):
self.config = configparser.ConfigParser(interpolation=None)
self.config.optionxform = str
if template:
self.config.read(template)
self.template = template
@ErrorHandler
def sections(self):
""" Returns a list of sections """
return (True, self.config.sections())
@ErrorHandler
def section(self, section):
""" Returns a list of tuples of (option, value) for the section """
# check if the named section exists
if self.config.has_section(section):
return (True, self.config.items(section))
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def options(self, section):
""" Returns a list of options for a section """
if self.config.has_section(section):
return (True, self.config.options(section))
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def option(self, section, option):
""" Returns the value of the option """
if self.config.has_section(section):
if self.config.has_option(section, option):
return (True, self.config.get(section, option))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def add_section(self, section):
"""
If section exists, returns log,
otherwise adds section and returns list of sections.
"""
# check if section already exists
if not self.config.has_section(section):
self.config.add_section(section)
# return updated sections
return (True, self.config.sections())
return (False, 'Section: ' + section + ' already exists')
@ErrorHandler
def add_option(self, section, option, value=None):
"""
Creates an option for a section. If the section does
not exist, it will create the section.
"""
# check if section exists; create if not
if not self.config.has_section(section):
message = self.add_section(section)
if not message[0]:
return message
if not self.config.has_option(section, option):
if value:
self.config.set(section, option, value)
else:
self.config.set(section, option)
return(True, self.config.options(section))
return(False, 'Option: {} already exists @ {}'.format(option, section))
@ErrorHandler
def del_section(self, section):
""" Deletes a section if it exists """
if self.config.has_section(section):
self.config.remove_section(section)
return (True, self.config.sections())
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def del_option(self, section, option):
""" Deletes an option if the section and option exist """
if self.config.has_section(section):
if self.config.has_option(section, option):
self.config.remove_option(section, option)
return (True, self.config.options(section))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def set_option(self, section, option, value):
"""
Sets an option to a value in the given section. Option is created if it
does not already exist
"""
if self.config.has_section(section):
self.config.set(section, option, value)
return (True, self.config.options(section))
return (False, 'Section: ' + section + ' does not exist')
@ErrorHandler
def write_config(self):
with open(self.template, 'w') as configfile:
self.config.write(configfile)
return
@ErrorHandler
def constrained_sections(self, constraints=None, options=None):
"""
Takes a dictionary of option/values (constraints) that must be present
in a section, and returns a dictionary of sections and optionally a
dictionary of option/values defined by a list of options called options
that match the constraints
"""
sections = {}
if not constraints:
constraints = {}
if not options:
options = []
all_sections = self.sections()
for a_section in all_sections[1]:
include = True
for constraint in constraints:
result = self.option(a_section, constraint)
if not result[0] or result[1] != constraints[constraint]:
include = False
# handle group membership
if (result[0] and
constraint == 'groups' and
constraints[constraint] in result[1]):
include = True
if include:
sections[a_section] = {}
for option in options:
result = self.option(a_section, option)
if result[0]:
sections[a_section][option] = result[1]
return sections
| {
"content_hash": "d2b9b261a8bd173cea7866c797818f47",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 37.791666666666664,
"alnum_prop": 0.5790150679897097,
"repo_name": "Jeff-Wang93/vent",
"id": "0f255a95a2ee127f0c32d3127dcd50804a755d1c",
"size": "5442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vent/api/templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "227"
},
{
"name": "Makefile",
"bytes": "4747"
},
{
"name": "Python",
"bytes": "433992"
},
{
"name": "Shell",
"bytes": "2103"
}
],
"symlink_target": ""
} |
from ipfs_connector import IPFSConnector, IPFSConfig
from nn_loader import NNListener, NNLoader
class ProcessorCallback:
pass
class Processor(NNListener):
def __init__(self, callback: ProcessorCallback, ipfs_config: IPFSConfig):
print("Connecting to IPFS server %s:%d..." % (ipfs_config.server, ipfs_config.port))
try:
self.ipfs_connector = IPFSConnector(ipfs_config)
except:
raise IPFSError("Can't connect IPFS server")
print("IPFS server connected successfully")
self.nn_loader = NNLoader()
def cognition_completed(self, results):
pass
def cognite_batch(self, arch: str, model: str, data: str) -> (str, int):
try:
print("Downloading architecture file %s" % arch)
self.ipfs_connector.download_file(arch)
except:
raise IPFSError("Architecture file not found")
try:
print("Downloading model file %s" % model)
self.ipfs_connector.download_file(model)
except:
raise IPFSError("Model file not found")
try:
print("Downloading data file %s" % data)
self.ipfs_connector.download_file(data)
except:
raise IPFSError("Data file not found")
print("Running model and data..")
self.nn_loader.load_and_run(arch, model, data, self)
return 'task0', 0
def get_time_estimate(self):
# TODO: Implement
return 0
class IPFSError (Exception):
def __init__(self, message: str):
self.message = message
class ModelInconsistencyError (Exception):
pass
class DataInconsistencyError (Exception):
pass
| {
"content_hash": "3a158e3148cf4f019db88c372c3b6c70",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 92,
"avg_line_length": 27.81967213114754,
"alnum_prop": 0.6205067766647024,
"repo_name": "Neurochain/neurowrk",
"id": "fa41f630ad6ecf5ebacecc03d0167ea8afba836c",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13826"
}
],
"symlink_target": ""
} |
__author__ = 'stamylew'
from os.path import expanduser
def assign_path(hostname):
if hostname == "birdofprey":
home1 = "/home/stamylew/"
home2 = "/mnt/CLAWS1/stamilev/"
ilp_folder = home2 + "ilastik_projects/"
volumes_folder = home2 + "volumes/"
ilastik_path = home1 + "software/ilastik-1.1.6-Linux/run_ilastik.sh"
autocontext_path = home1 + "src/autocontext/autocontext.py"
test_folder = home2 + "test_folder"
elif hostname == "fatchicken":
home1 = "/home/stamyalew/"
ilp_folder = home1 + "ilastik_projects/"
volumes_folder = home1 + "volumes/"
ilastik_path = home1 + "software/ilastik-1.1.8.post1-Linux/run_ilastik.sh"
autocontext_path = home1 + "src/autocontext/autocontext.py"
test_folder = home1 + "test_folder"
elif hostname == "sirherny":
home1 = "/mnt/homes/stamyalew/"
home2 = "/mnt/data/"
ilp_folder = home2 + "simon/ilastik_projects/"
volumes_folder = home2 + "simon/volumes/"
ilastik_path = home1 + "software/ilastik-1.1.8.post1-Linux/run_ilastik.sh"
autocontext_path = home1 + "software/autocontext/autocontext.py"
test_folder = home1 + "test_folder"
else:
raise Exception("No valid hostname given.")
return home1, ilp_folder, volumes_folder, ilastik_path, autocontext_path, test_folder
class host:
def __init__(self, hostname):
self.hostname = hostname
@property
def hostname(self):
"""Returns hostname
"""
return self.hostname
@property
def home_dir(self):
"""Returns home directory path
"""
home = expanduser("~")
return home
def get_ilp_folder(self, hostname, home_dir):
"""Return ilp folder path
"""
if hostname == "birdofprey" or "fatchicken":
ilp_folder = home_dir + "ilastik_projects/"
elif hostname == "sirherny":
ilp_folder = "/mnt/data/simon/ilastik_projects/"
return ilp_folder
def get_volumes_folder(self, hostname, home_dir):
"""Return volumes folder path
"""
if hostname == "birdofprey" or "fatchicken":
volumes_folder = home_dir + "volumes/"
elif hostname == "sirherny":
volumes_folder = "/mnt/data/simon/volumes/"
return volumes_folder
def get_ilastik_path(self, hostname, home_dir):
"""Return ilastik path
"""
if hostname == "birdofprey":
ilastik_path = home_dir + "software/ilastik-1.1.6-Linux/run_ilastik.sh"
elif hostname == "fatchicken" or "sirherny":
ilastik_path = home_dir + "software/ilastik-1.1.8.post1-Linux/run_ilastik.sh"
return ilastik_path
def get_autocontext_path(self, hostname, home_dir):
"""Return autocontext path
"""
if hostname == "birdofprey" or "fatchicken":
autocontext_path = home_dir + "src/autocontext/autocontext.py"#
elif hostname == "sirherny":
autocontext_path = home_dir + "software/autocontext/autocontext.py"
return autocontext_path
def get_test_folder_path(self, home_dir):
"""Return test folder path
"""
test_folder_path = home_dir + "test_folder"
return test_folder_path | {
"content_hash": "209d1edf0bba2cb7889b8b0b8d4251c4",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 89,
"avg_line_length": 37.57608695652174,
"alnum_prop": 0.5788255713045993,
"repo_name": "simonsgit/bachelor_stuff",
"id": "0a525f4f210e527da51194ce2c991f1f758f5799",
"size": "3457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "other/host_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109641"
}
],
"symlink_target": ""
} |
from django.db import models
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
owner = models.ForeignKey('auth.User', related_name='snippets', default=1)
highlighted = models.TextField(default='cake')
def save(self, *args, **kwargs):
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos, full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
class Meta:
ordering = ('created', )
| {
"content_hash": "40aa5bc45077f7af0c7af773d0b188be",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 91,
"avg_line_length": 45.483870967741936,
"alnum_prop": 0.7056737588652482,
"repo_name": "whoww/PlaylistServer",
"id": "6e5ee545958a20814579d2a3706f5f71e212f54e",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippets/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7038"
},
{
"name": "HTML",
"bytes": "28581"
},
{
"name": "JavaScript",
"bytes": "5057"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "Python",
"bytes": "29940"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import MAXDB_ALIASES
from lib.request import inject
from lib.request.connect import Connect as Request
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.MAXDB)
def _versionCheck(self):
infoMsg = "executing %s SYSINFO version check" % DBMS.MAXDB
logger.info(infoMsg)
query = agent.prefixQuery("/* NoValue */")
query = agent.suffixQuery(query)
payload = agent.payload(newValue=query)
result = Request.queryPage(payload)
if not result:
warnMsg = "unable to perform %s version check" % DBMS.MAXDB
logger.warn(warnMsg)
return None
minor, major = None, None
for version in (6, 7):
result = inject.checkBooleanExpression("%d=(SELECT MAJORVERSION FROM SYSINFO.VERSION)" % version)
if result:
major = version
for version in xrange(0, 10):
result = inject.checkBooleanExpression("%d=(SELECT MINORVERSION FROM SYSINFO.VERSION)" % version)
if result:
minor = version
if major and minor:
return "%s.%s" % (major, minor)
else:
return None
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
blank = " " * 15
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.MAXDB
return value
actVer = Format.getDbms() + " (%s)" % self._versionCheck()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
value += "\n%sbanner parsing fingerprint: -" % blank
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and (Backend.isDbmsWithin(MAXDB_ALIASES) or conf.dbms in MAXDB_ALIASES):
setDbms(DBMS.MAXDB)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.MAXDB
logger.info(infoMsg)
result = inject.checkBooleanExpression("ALPHA(NULL) IS NULL")
if result:
infoMsg = "confirming %s" % DBMS.MAXDB
logger.info(infoMsg)
result = inject.checkBooleanExpression("MAPCHAR(NULL,1,DEFAULTMAP) IS NULL")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.MAXDB
logger.warn(warnMsg)
return False
setDbms(DBMS.MAXDB)
self.getBanner()
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.MAXDB
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
else:
conf.db = "USER"
if conf.tbl:
conf.tbl = conf.tbl.upper()
| {
"content_hash": "d87c5ae3d938bafe1317662b9934bcee",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 109,
"avg_line_length": 27.816176470588236,
"alnum_prop": 0.5855141422151732,
"repo_name": "JeyZeta/Dangerous",
"id": "c808e6d6beecd1edbeb3ec22675c3faa4700ca67",
"size": "3783",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/plugins/dbms/maxdb/fingerprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
import github3
from github3.issues.comment import IssueComment
from github3.issues.event import IssueEvent
from github3.issues.label import Label
from github3.issues.milestone import Milestone
from github3.issues import Issue
import datetime
from tests.utils import BaseCase, load, mock
class TestLabel(BaseCase):
def __init__(self, methodName='runTest'):
super(TestLabel, self).__init__(methodName)
self.l = Label(load('label'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"labels/Bug")
def setUp(self):
super(TestLabel, self).setUp()
self.l = Label(self.l.to_json(), self.g)
def test_equality(self):
l = Label(load('label'))
assert self.l == l
l._uniq = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"labels/wontfix")
assert self.l != l
def test_repr(self):
assert repr(self.l) == '<Label [{0}]>'.format(self.l.name)
def test_str(self):
assert str(self.l) == self.l.name
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.l.delete)
self.not_called()
self.login()
assert self.l.delete()
def test_update(self):
self.response('label', 200)
self.patch(self.api)
self.conf = {'data': {'name': 'newname', 'color': 'afafaf'}}
self.assertRaises(github3.GitHubError, self.l.update, None, None)
self.login()
assert self.l.update(None, None) is False
self.not_called()
assert self.l.update('newname', 'afafaf')
self.mock_assertions()
assert self.l.update('newname', '#afafaf')
self.mock_assertions()
class TestMilestone(BaseCase):
def __init__(self, methodName='runTest'):
super(TestMilestone, self).__init__(methodName)
self.m = Milestone(load('milestone'))
self.api = ("https://api.github.com/repos/kennethreitz/requests/"
"milestones/18")
def setUp(self):
super(TestMilestone, self).setUp()
self.m = Milestone(self.m.to_json(), self.g)
def test_repr(self):
assert repr(self.m) == '<Milestone [v1.0.0]>'
def test_str(self):
assert str(self.m) == 'v1.0.0'
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.m.delete)
self.not_called()
self.login()
assert self.m.delete()
self.mock_assertions()
def test_due_on(self):
json = self.m.to_json().copy()
json['due_on'] = '2012-12-31T23:59:59Z'
m = Milestone(json)
assert isinstance(m.due_on, datetime.datetime)
def test_iter_labels(self):
self.response('label', _iter=True)
self.get(self.api + '/labels')
i = self.m.iter_labels()
assert isinstance(i, github3.structs.GitHubIterator)
assert isinstance((next(i)), Label)
self.mock_assertions()
def test_update(self):
self.response('milestone', 200)
self.patch(self.api)
self.conf = {
'data': {
'title': 'foo',
'state': 'closed',
'description': ':sparkles:',
'due_on': '2013-12-31T23:59:59Z'
}
}
self.assertRaises(github3.GitHubError, self.m.update, None)
self.login()
assert self.m.update(None) is False
self.not_called()
assert self.m.update(state='closed')
assert self.m.update('foo', 'closed', ':sparkles:',
'2013-12-31T23:59:59Z')
self.mock_assertions()
class TestIssue(BaseCase):
def __init__(self, methodName='runTest'):
super(TestIssue, self).__init__(methodName)
self.i = Issue(load('issue'))
self.api = ("https://api.github.com/repos/sigmavirus24/github3.py/"
"issues/1")
def setUp(self):
super(TestIssue, self).setUp()
self.i = Issue(self.i.to_json(), self.g)
def test_equality(self):
i = Issue(load('issue'))
assert self.i == i
i._uniq = 1
assert self.i != i
def test_repr(self):
assert repr(self.i) == '<Issue [sigmavirus24/github3.py #1]>'
def test_add_labels(self):
self.response('label', 200, _iter=True)
self.post(self.api + '/labels')
self.conf = {'data': '["enhancement"]'}
self.assertRaises(github3.GitHubError, self.i.add_labels, 'foo')
self.not_called()
self.login()
labels = self.i.add_labels('enhancement')
assert labels != []
assert isinstance(labels[0], Label)
self.mock_assertions()
def test_assign(self):
self.assertRaises(github3.GitHubError, self.i.assign, 'foo')
self.login()
with mock.patch.object(Issue, 'edit') as ed:
ed.return_value = True
assert self.i.assign(None) is False
self.not_called()
assert self.i.assign('sigmavirus24')
n = self.i.milestone.number if self.i.milestone else None
labels = [str(l) for l in self.i.labels]
ed.assert_called_once_with(
self.i.title, self.i.body, 'sigmavirus24', self.i.state, n,
labels
)
def test_close(self):
self.assertRaises(github3.GitHubError, self.i.close)
self.not_called()
self.login()
with mock.patch.object(Issue, 'edit') as ed:
ed.return_value = True
assert self.i.close()
u = self.i.assignee.login if self.i.assignee else ''
n = self.i.milestone.number if self.i.milestone else None
l = [str(label) for label in self.i.labels]
ed.assert_called_once_with(
self.i.title, self.i.body, u, self.i.state, n, l
)
def test_comment(self):
self.response('issue_comment')
self.get(self.api[:-1] + 'comments/476476')
c = self.i.comment('476476')
assert isinstance(c, IssueComment)
assert repr(c).startswith('<Issue Comment')
self.mock_assertions()
def test_create_comment(self):
self.response('issue_comment', 201)
self.post(self.api + '/comments')
self.conf = {'data': {'body': 'comment body'}}
self.assertRaises(github3.GitHubError, self.i.create_comment, '')
self.login()
assert self.i.create_comment(None) is None
self.not_called()
assert isinstance(self.i.create_comment('comment body'), IssueComment)
self.mock_assertions()
def test_edit(self):
self.response('issue', 200)
self.patch(self.api)
self.conf = {'data': {'title': 'new title', 'milestone': None}}
self.assertRaises(github3.GitHubError, self.i.edit)
self.login()
assert self.i.edit() is False
self.not_called()
assert self.i.edit('new title', milestone=0)
self.mock_assertions()
def test_is_closed(self):
assert self.i.is_closed()
self.i.closed_at = None
assert self.i.is_closed()
self.i.state = 'open'
assert self.i.is_closed() is False
def test_iter_comments(self):
self.response('issue_comment', _iter=True)
self.get(self.api + '/comments')
assert isinstance((next(self.i.iter_comments())), IssueComment)
self.mock_assertions()
def test_iter_events(self):
self.response('issue_event', _iter=True)
self.get(self.api + '/events')
e = next(self.i.iter_events())
assert isinstance(e, IssueEvent)
assert repr(e).startswith('<Issue Event')
self.mock_assertions()
def test_remove_label(self):
self.response('', 204)
self.delete(self.api + '/labels/name')
self.assertRaises(github3.GitHubError, self.i.remove_label, 'name')
self.not_called()
self.login()
assert self.i.remove_label('name')
self.mock_assertions()
def test_remove_all_labels(self):
self.assertRaises(github3.GitHubError, self.i.remove_all_labels)
self.login()
with mock.patch.object(Issue, 'replace_labels') as rl:
rl.return_value = []
assert self.i.remove_all_labels() == []
rl.assert_called_once_with([])
def test_replace_labels(self):
self.response('label', _iter=True)
self.put(self.api + '/labels')
self.conf = {'data': '["foo", "bar"]'}
self.assertRaises(github3.GitHubError, self.i.replace_labels, [])
self.not_called()
self.login()
labels = self.i.replace_labels(['foo', 'bar'])
assert labels != []
assert isinstance(labels[0], Label)
def test_reopen(self):
self.assertRaises(github3.GitHubError, self.i.reopen)
self.login()
n = self.i.milestone.number if self.i.milestone else None
u = self.i.assignee.login if self.i.assignee else None
with mock.patch.object(Issue, 'edit') as ed:
ed.return_value = True
assert self.i.reopen()
labels = [str(l) for l in self.i.labels]
ed.assert_called_once_with(
self.i.title, self.i.body, u, 'open', n, labels
)
def test_enterprise(self):
Issue(load('issue_enterprise'))
def test_issue_137(self):
"""
GitHub sometimes returns `pull` as part of of the `html_url` for Issue
requests.
"""
i = Issue(load('issue_137'))
self.assertEqual(
i.html_url,
"https://github.com/sigmavirus24/github3.py/pull/1")
self.assertEqual(i.repository, ("sigmavirus24", "github3.py"))
class TestIssueEvent(BaseCase):
def setUp(self):
super(TestIssueEvent, self).setUp()
self.ev = IssueEvent(load('issue_event'))
def test_repr(self):
assert repr(self.ev) == '<Issue Event [{0} by {1}]>'.format(
'closed', 'sigmavirus24'
)
def test_equality(self):
e = IssueEvent(load('issue_event'))
assert self.ev == e
e._uniq = 'fake'
assert self.ev != e
| {
"content_hash": "58fd41a3c5fe4b331466b63065f42ce0",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 78,
"avg_line_length": 30.533923303834808,
"alnum_prop": 0.5731813351367018,
"repo_name": "msabramo/github3.py",
"id": "f5daeb1bd68ca7108e247f4a903ce67ae8441cee",
"size": "10351",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_issues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Custom filters for use in openshift-ansible
"""
from ansible import errors
def osmrq_get_existing_namespaces(oc_obj_namespaces_list):
'''Take the output of the oc_obj namespaces list and return a list of namespaces that are foun
- oc_obj
'''
valid_namespace_list = []
for namespace in oc_obj_namespaces_list:
if 'kind' in namespace['results']['results'][0]:
valid_namespace_list.append(namespace['item'])
return valid_namespace_list
class FilterModule(object):
""" Custom ansible filter mapping """
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
"osmrq_get_existing_namespaces": osmrq_get_existing_namespaces,
}
| {
"content_hash": "fc9f52ecf2cfaf70ff0479665ec934a8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6575,
"repo_name": "blrm/openshift-tools",
"id": "f6a553909d2508141d963a8b145d8b8ef29681f7",
"size": "875",
"binary": false,
"copies": "4",
"ref": "refs/heads/stg",
"path": "ansible/roles/openshift_master_resource_quota/filter_plugins/osmrq_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Dockerfile",
"bytes": "71369"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "146500"
},
{
"name": "JavaScript",
"bytes": "2380"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "37739550"
},
{
"name": "Shell",
"bytes": "1645744"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
__revision__ = "test/CPPDEFINES/pkg-config.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify merging with MergeFlags to CPPPDEFINES with various data types.
"""
import TestSCons
test = TestSCons.TestSCons()
pkg_config_path = test.where_is('pkg-config')
if not pkg_config_path:
test.skip_test("Could not find 'pkg-config' in system PATH, skipping test.\n")
test.write('bug.pc', """\
prefix=/usr
exec_prefix=${prefix}
libdir=${exec_prefix}/lib
includedir=${prefix}/include
Name: bug
Description: A test case .pc file
Version: 1.2
Cflags: -DSOMETHING -DVARIABLE=2
""")
test.write('main.c', """\
int main(int argc, char *argv[])
{
return 0;
}
""")
test.write('SConstruct', """\
# http://scons.tigris.org/issues/show_bug.cgi?id=2671
# Passing test cases
env_1 = Environment(CPPDEFINES=[('DEBUG','1'), 'TEST'])
env_1.ParseConfig('PKG_CONFIG_PATH=. %(pkg_config_path)s --cflags bug')
print env_1.subst('$_CPPDEFFLAGS')
env_2 = Environment(CPPDEFINES=[('DEBUG','1'), 'TEST'])
env_2.MergeFlags('-DSOMETHING -DVARIABLE=2')
print env_2.subst('$_CPPDEFFLAGS')
# Failing test cases
env_3 = Environment(CPPDEFINES={'DEBUG':1, 'TEST':None})
env_3.ParseConfig('PKG_CONFIG_PATH=. %(pkg_config_path)s --cflags bug')
print env_3.subst('$_CPPDEFFLAGS')
env_4 = Environment(CPPDEFINES={'DEBUG':1, 'TEST':None})
env_4.MergeFlags('-DSOMETHING -DVARIABLE=2')
print env_4.subst('$_CPPDEFFLAGS')
# http://scons.tigris.org/issues/show_bug.cgi?id=1738
env_1738_1 = Environment(tools=['default'])
env_1738_1.ParseConfig('PKG_CONFIG_PATH=. %(pkg_config_path)s --cflags --libs bug')
env_1738_1.Append(CPPDEFINES={'value' : '1'})
print env_1738_1.subst('$_CPPDEFFLAGS')
"""%locals() )
expect_print_output="""\
-DDEBUG=1 -DTEST -DSOMETHING -DVARIABLE=2
-DDEBUG=1 -DTEST -DSOMETHING -DVARIABLE=2
-DDEBUG=1 -DTEST -DSOMETHING -DVARIABLE=2
-DDEBUG=1 -DTEST -DSOMETHING -DVARIABLE=2
-DSOMETHING -DVARIABLE=2 -Dvalue=1
"""
build_output="scons: `.' is up to date.\n"
expect = test.wrap_stdout(build_str=build_output,
read_str = expect_print_output)
test.run(arguments = '.', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "71bf45840ed92c58751d6fff4d760138",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 103,
"avg_line_length": 28.0875,
"alnum_prop": 0.6933689363595905,
"repo_name": "EmanueleCannizzaro/scons",
"id": "a3a69af90bba9e3864d819dae5d0fc89aa8f238d",
"size": "3382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/CPPDEFINES/pkg-config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from compose.config.environment import Environment
from compose.config.interpolation import interpolate_environment_variables
from compose.config.interpolation import Interpolator
from compose.config.interpolation import InvalidInterpolation
from compose.config.interpolation import TemplateWithDefaults
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V3_1 as V3_1
@pytest.fixture
def mock_env():
return Environment({'USER': 'jenny', 'FOO': 'bar'})
@pytest.fixture
def variable_mapping():
return Environment({'FOO': 'first', 'BAR': ''})
@pytest.fixture
def defaults_interpolator(variable_mapping):
return Interpolator(TemplateWithDefaults, variable_mapping).interpolate
def test_interpolate_environment_variables_in_services(mock_env):
services = {
'servicea': {
'image': 'example:${USER}',
'volumes': ['$FOO:/target'],
'logging': {
'driver': '${FOO}',
'options': {
'user': '$USER',
}
}
}
}
expected = {
'servicea': {
'image': 'example:jenny',
'volumes': ['bar:/target'],
'logging': {
'driver': 'bar',
'options': {
'user': 'jenny',
}
}
}
}
value = interpolate_environment_variables(V2_0, services, 'service', mock_env)
assert value == expected
def test_interpolate_environment_variables_in_volumes(mock_env):
volumes = {
'data': {
'driver': '$FOO',
'driver_opts': {
'max': 2,
'user': '${USER}'
}
},
'other': None,
}
expected = {
'data': {
'driver': 'bar',
'driver_opts': {
'max': 2,
'user': 'jenny'
}
},
'other': {},
}
value = interpolate_environment_variables(V2_0, volumes, 'volume', mock_env)
assert value == expected
def test_interpolate_environment_variables_in_secrets(mock_env):
secrets = {
'secretservice': {
'file': '$FOO',
'labels': {
'max': 2,
'user': '${USER}'
}
},
'other': None,
}
expected = {
'secretservice': {
'file': 'bar',
'labels': {
'max': 2,
'user': 'jenny'
}
},
'other': {},
}
value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
assert value == expected
def test_escaped_interpolation(defaults_interpolator):
assert defaults_interpolator('$${foo}') == '${foo}'
def test_invalid_interpolation(defaults_interpolator):
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('$}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${ }')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${ foo}')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${foo }')
with pytest.raises(InvalidInterpolation):
defaults_interpolator('${foo!}')
def test_interpolate_missing_no_default(defaults_interpolator):
assert defaults_interpolator("This ${missing} var") == "This var"
assert defaults_interpolator("This ${BAR} var") == "This var"
def test_interpolate_with_value(defaults_interpolator):
assert defaults_interpolator("This $FOO var") == "This first var"
assert defaults_interpolator("This ${FOO} var") == "This first var"
def test_interpolate_missing_with_default(defaults_interpolator):
assert defaults_interpolator("ok ${missing:-def}") == "ok def"
assert defaults_interpolator("ok ${missing-def}") == "ok def"
assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
def test_interpolate_with_empty_and_default_value(defaults_interpolator):
assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
assert defaults_interpolator("ok ${BAR-def}") == "ok "
| {
"content_hash": "03087ad3d1683b62faa1806abc5adb80",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 92,
"avg_line_length": 29.89189189189189,
"alnum_prop": 0.5825045207956601,
"repo_name": "hoogenm/compose",
"id": "018a5621a4cb1176c07ded48f314bb4f212b908b",
"size": "4424",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/config/interpolation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2600"
},
{
"name": "Python",
"bytes": "817047"
},
{
"name": "Shell",
"bytes": "30370"
}
],
"symlink_target": ""
} |
import sys
import locomatix
import locomatix.lql as lql
from _utils import *
def query_location_history():
"""docstring for query_location_history"""
parser = locomatix.ArgsParser()
parser.add_description("Query the location history of an object")
parser.add_arg('query', 'LQL query to execute')
parser.add_option('start-time', 'b:', 'starttime=', 'Start time of the location history')
parser.add_option('end-time', 'e:', 'endtime=', 'End time of the location history')
args = parser.parse_args(sys.argv)
query = args['query']
try:
start_time_present = True if len(args['start-time']) > 0 else False
end_time_present = True if len(args['end-time']) > 0 else False
if start_time_present and end_time_present:
end_time = convert_time(args['end-time'])
start_time = convert_time(args['start-time'])
elif start_time_present:
start_time = convert_time(args['start-time'])
end_time = start_time + 3600
elif end_time_present:
end_time = convert_time(args['end-time'])
start_time = end_time - 3600
else:
end_time = time.time()
start_time = end_time - 3600
except ValueError:
print "start time or end time not in valid format"
sys.exit(1)
try:
lxclient = locomatix.Client(args['custid'], \
args['key'], \
args['secret-key'], \
args['host'], \
args['port'])
except:
print "Unable to connect to %s at port %d" % (args['host'],args['port'])
sys.exit(1)
try:
start_key = locomatix.DEFAULT_FETCH_STARTKEY
fetch_size = locomatix.DEFAULT_FETCH_SIZE
query = lql.Query(query)._query
while True:
batch = lxclient._request('get_location_history', query, start_time, end_time, start_key, fetch_size)
if len(batch.locations) > 0:
dprint(args, lxclient.response_body(), '\n'.join('%s' % loc for loc in batch.locations))
elif len(batch.aggrs) > 0:
dprint(args, lxclient.response_body(), '\n'.join('%s' % aggr for aggr in batch.aggrs))
if batch.next_key == None:
break # this is the last batch
start_key = batch.next_key
except locomatix.LxException, e:
dprint(args, lxclient.response_body(), \
"error: failed to query location history for %s - %s" % (query, str(e)))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
query_location_history()
| {
"content_hash": "f817e1ca20a910ba1b22a7d1e24f9af7",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 107,
"avg_line_length": 32.55844155844156,
"alnum_prop": 0.6098923015556442,
"repo_name": "locomatix/locomatix-python",
"id": "79e3fde327885ab79848e3d817314fa03c200ca0",
"size": "3275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locomatix/cli/query_location_history.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "238776"
}
],
"symlink_target": ""
} |
from psi.app.utils import get_name
from psi.app import const
from psi.app.models.data_security_mixin import DataSecurityMixin
from psi.app.service import Info
from psi.app.utils import date_util
from sqlalchemy import Column, Integer, ForeignKey, String, Date, select, func, \
event
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
db = Info.get_db()
class Customer(db.Model, DataSecurityMixin):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
first_name = Column(String(32), unique=False, nullable=True)
last_name = Column(String(32), unique=False, nullable=True)
mobile_phone = Column(String(32), unique=True, nullable=True)
email = Column(String(64))
address = Column(String(64), unique=False, nullable=True)
birthday = Column(Date, nullable=True)
join_date = Column(Date, nullable=False)
points = Column(Integer, nullable=False)
join_channel_id = Column(Integer, ForeignKey('enum_values.id'), nullable=False)
join_channel = relationship('EnumValues', foreign_keys=[join_channel_id])
level_id = Column(Integer, ForeignKey('enum_values.id'), nullable=False)
level = relationship('EnumValues', foreign_keys=[level_id])
organization_id = db.Column(Integer, ForeignKey('organization.id'))
organization = relationship('Organization', foreign_keys=[organization_id])
mnemonic = Column(String(64), unique=False, nullable=True)
@hybrid_property
def member_age(self):
return int(date_util.num_years(self.join_date))
@member_age.setter
def member_age(self, val):
pass
@member_age.expression
def member_age(self):
"""
Being used in the UI sorting and filtering
:return:member age
"""
return func.date_part("year", func.age(self.join_date)).label("member_age")
@hybrid_property
def name(self):
return get_name(self.last_name, self.first_name)
@name.setter
def name(self, value):
pass
@hybrid_property
def total_spent(self):
orders = self.sales_orders
t = 0
for order in orders:
t += order.actual_amount
return t
@total_spent.setter
def total_spent(self, val):
pass
@total_spent.expression
def total_spent(self):
from psi.app.models.sales_order import SalesOrder, SalesOrderLine
return (select([func.sum(SalesOrderLine.quantity * SalesOrderLine.unit_price)])
.where(SalesOrder.id == SalesOrderLine.sales_order_id)
.where(self.id == SalesOrder.customer_id)
.label('total_spent'))
@staticmethod
def join_channel_filter():
from psi.app.models.enum_values import EnumValues
return EnumValues.type_filter(const.CUSTOMER_JOIN_CHANNEL_KEY)
@staticmethod
def level_filter():
from psi.app.models.enum_values import EnumValues
return EnumValues.type_filter(const.CUSTOMER_LEVEL_KEY)
def __repr__(self):
return self.name + ' - ' + self.level.display
def get_value_for_mnemonic(self):
return get_name(self.last_name, self.first_name)
| {
"content_hash": "fd3398cf633e90f648129b632bf13c59",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 87,
"avg_line_length": 33.88297872340426,
"alnum_prop": 0.6712715855572998,
"repo_name": "betterlife/psi",
"id": "905777999250345b6ab02db5d44855a74baefe5c",
"size": "3203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "psi/app/models/customer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14410"
},
{
"name": "HTML",
"bytes": "52928"
},
{
"name": "JavaScript",
"bytes": "493605"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "528554"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, redirect, url_for
from flask_assets import Environment, Bundle
try:
from flask.ext.cors import CORS # The typical way to import flask-cors
except ImportError:
# Path hack allows examples to be run without installation.
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from flask.ext.cors import CORS
from random import uniform
from core.configuration import init
import core.parser as parser
import core.data_manip as data_manip
import core.cache as cache
# Import blueprint modules
route_modules = ['data', 'citymap']
for module in route_modules:
exec('from routes.%s import *' % (module))
app = Flask(__name__)
cache.init_app(app, config={'CACHE_TYPE': 'simple'})
cors = CORS(app, allow_headers='Content-Type')
# Init the flask application by parameters
init(app)
# Route / will redirect users to the citymap_main function
# from the citymap blueprint
@app.route('/')
def main():
return redirect(url_for("citymap.citymap_main"))
# Achtung! Run the app from the directory src to make it works
region_info = parser.parse_region_data('../data/polygon-info.csv')
parser.parse_average_price_data('../data/average-housing-price.csv', region_info)
park_data = parser.parse_park_data('../data/green-areas-and-parks.csv')
park_data = data_manip.add_postcode_for_places(region_info, park_data)
sport_data = parser.parse_sport_fields_data('../data/open-sport-fields.csv')
sport_data = data_manip.add_postcode_for_places(region_info, sport_data)
# function_data = parser.parse_building_function_data('../data/FUNCTIEKAART_region.dbf')
# postcode_data = parser.parse_postcode_data('../data/postcode_NH.csv')
__category_mapping, categories = parser.parse_category_mapping('../data/matching.csv')
# functional_dataset = data_manip.create_building_function_dataset(function_data, postcode_data, category_mapping)
functional_dataset = parser.parse_functional_building_data('../data/func_data.csv')
places_data = park_data + sport_data + functional_dataset
def calc_green_stat(index):
park_area = sum([place['area'] for place in park_data if place['region'] == region_info[index]['region']])
return [park_area / region_info[index]['area'], park_area]
def calc_sport_stat(index):
tmp = len([place for place in sport_data if place['region'] == region_info[index]['region']])
return [((10 ** 5) * tmp) / region_info[index]['area'], tmp]
def split_data(index):
all_places = [place for place in functional_dataset if place['region'] == region_info[index]['region']]
res = {}
for cat in categories:
tr = len([place for place in all_places if place['type'] == cat])
res[cat] = ((10 ** 5 * tr) / region_info[index]['area'], tr)
return res
categories.append('non-leisure')
categories.append('green')
maxcat = {}
index = 0
for region in region_info:
fd = split_data(index)
for cat in categories:
if cat in fd:
#print region, cat, fd[cat][0]
region_info[index][cat] = fd[cat][0]
region_info[index][cat + "_real"] = fd[cat][1]
if cat == 'sport':
sport = calc_sport_stat(index)
if 'sport' in region_info[index]:
region_info[index]['sport'] += sport[0]
region_info[index][cat + "_real"] += sport[1]
else:
region_info[index]['sport'] = sport[0]
region_info[index][cat + "_real"] = sport[1]
elif cat == 'green':
green = calc_green_stat(index)
region_info[index]['green'] = green[0]
region_info[index]['green_real'] = green[1]
if cat not in maxcat or region_info[index][cat] > maxcat[cat]:
maxcat[cat] = region_info[index][cat]
index += 1
for region in region_info:
for cat in categories:
region[cat] = (region[cat]/maxcat[cat])*100
#print region_info#, places_data, categories
construct_data(region_info = region_info, places_data = places_data, supported_types=categories)
construct_citymap()
# Registers flask modules (called Blueprints)
app.register_blueprint(data)
app.register_blueprint(citymap)
assets = Environment(app)
js_main = Bundle("js/main.js", "js/main.js",
filters="jsmin", output="gen/min.js")
assets.register("js_main", js_main)
if __name__ == '__main__':
# Run the app
app.run(host='0.0.0.0', port=10052)
| {
"content_hash": "e2c8e9c6b49f78cfefcb402401e3ea1a",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 114,
"avg_line_length": 35.330708661417326,
"alnum_prop": 0.662357922888344,
"repo_name": "Ignotus/infoviz-project",
"id": "08fa3934ce10ad7aae185456b5216b866664e85c",
"size": "4487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2266"
},
{
"name": "HTML",
"bytes": "14229"
},
{
"name": "JavaScript",
"bytes": "21978"
},
{
"name": "Python",
"bytes": "17505"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.search.models import Query
class SearchPromotion(models.Model):
query = models.ForeignKey(Query, db_index=True, related_name='editors_picks', on_delete=models.CASCADE)
page = models.ForeignKey('wagtailcore.Page', verbose_name=_('page'), on_delete=models.CASCADE)
sort_order = models.IntegerField(null=True, blank=True, editable=False)
description = models.TextField(verbose_name=_('description'), blank=True)
def __repr__(self):
return 'SearchPromotion(query="' + self.query.query_string + '", page="' + self.page.title + '")'
class Meta:
ordering = ('sort_order', )
verbose_name = _("search promotion")
| {
"content_hash": "99f268a56c1194d7a191348294365f0c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 107,
"avg_line_length": 41.94444444444444,
"alnum_prop": 0.6980132450331126,
"repo_name": "mikedingjan/wagtail",
"id": "a0d3c6c93287236942b0c9ec49992314102695b9",
"size": "755",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "wagtail/contrib/search_promotions/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183841"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "373400"
},
{
"name": "JavaScript",
"bytes": "266257"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3607707"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
} |
from abc import (
ABCMeta,
abstractmethod,
)
from errno import ENOENT
from os import remove
from os.path import exists
import sqlite3
from bcolz import (
carray,
ctable,
open as open_ctable,
)
from click import progressbar
from numpy import (
array,
int64,
float64,
floating,
full,
iinfo,
integer,
issubdtype,
nan,
uint32,
)
from pandas import (
DataFrame,
DatetimeIndex,
read_csv,
Timestamp,
)
from six import (
iteritems,
with_metaclass,
)
from zipline.utils.input_validation import coerce_string, preprocess
from ._equities import _compute_row_slices, _read_bcolz_data
from ._adjustments import load_adjustments_from_sqlite
import logbook
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = [
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
]
SQLITE_ADJUSTMENT_COLUMNS = frozenset(['effective_date', 'ratio', 'sid'])
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': floating,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
SQLITE_DIVIDEND_PAYOUT_COLUMNS = frozenset(
['sid',
'ex_date',
'declared_date',
'pay_date',
'record_date',
'amount'])
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'amount': float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMNS = frozenset(
['sid',
'ex_date',
'declared_date',
'record_date',
'pay_date',
'payment_sid',
'ratio'])
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'payment_sid': integer,
'ratio': float,
}
UINT32_MAX = iinfo(uint32).max
class NoDataOnDate(Exception):
"""
Raised when a spot price can be found for the sid and date.
"""
pass
class BcolzDailyBarWriter(with_metaclass(ABCMeta)):
"""
Class capable of writing daily OHLCV data to disk in a format that can be
read efficiently by BcolzDailyOHLCVReader.
See Also
--------
BcolzDailyBarReader : Consumer of the data written by this class.
"""
@abstractmethod
def gen_tables(self, assets):
"""
Return an iterator of pairs of (asset_id, bcolz.ctable).
"""
raise NotImplementedError()
@abstractmethod
def to_uint32(self, array, colname):
"""
Convert raw column values produced by gen_tables into uint32 values.
Parameters
----------
array : np.array
An array of raw values.
colname : str, {'open', 'high', 'low', 'close', 'volume', 'day'}
The name of the column being loaded.
For output being read by the default BcolzOHLCVReader, data should be
stored in the following manner:
- Pricing columns (Open, High, Low, Close) should be stored as 1000 *
as-traded dollar value.
- Volume should be the as-traded volume.
- Dates should be stored as seconds since midnight UTC, Jan 1, 1970.
"""
raise NotImplementedError()
def write(self, filename, calendar, assets, show_progress=False):
"""
Parameters
----------
filename : str
The location at which we should write our output.
calendar : pandas.DatetimeIndex
Calendar to use to compute asset calendar offsets.
assets : pandas.Int64Index
The assets for which to write data.
show_progress : bool
Whether or not to show a progress bar while writing.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
_iterator = self.gen_tables(assets)
if show_progress:
pbar = progressbar(
_iterator,
length=len(assets),
item_show_func=lambda i: i if i is None else str(i[0]),
label="Merging asset files:",
)
with pbar as pbar_iterator:
return self._write_internal(filename, calendar, pbar_iterator)
return self._write_internal(filename, calendar, _iterator)
def _write_internal(self, filename, calendar, iterator):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(full((nrows,), asset_id))
continue
columns[column_name].append(
self.to_uint32(table[column_name][:], column_name)
)
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
# HACK: Index with a list so that we get back an array we can pass
# to self.to_uint32. We could try to extract this in the loop
# above, but that makes the logic a lot messier.
asset_first_day = self.to_uint32(table['day'][[0]], 'day')[0]
calendar_offset[asset_key] = calendar.get_loc(
Timestamp(asset_first_day, unit='s', tz='UTC'),
)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=filename,
mode='w',
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar'] = calendar.asi8.tolist()
return full_table
class DailyBarWriterFromCSVs(BcolzDailyBarWriter):
"""
BcolzDailyBarWriter constructed from a map from csvs to assets.
Parameters
----------
asset_map : dict
A map from asset_id -> path to csv with data for that asset.
CSVs should have the following columns:
day : datetime64
open : float64
high : float64
low : float64
close : float64
volume : int64
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, asset_map):
self._asset_map = asset_map
def gen_tables(self, assets):
"""
Read CSVs as DataFrames from our asset map.
"""
dtypes = self._csv_dtypes
for asset in assets:
path = self._asset_map.get(asset)
if path is None:
raise KeyError("No path supplied for asset %s" % asset)
data = read_csv(path, parse_dates=['day'], dtype=dtypes)
yield asset, ctable.fromdataframe(data)
def to_uint32(self, array, colname):
arrmax = array.max()
if colname in OHLC:
self.check_uint_safe(arrmax * 1000, colname)
return (array * 1000).astype(uint32)
elif colname == 'volume':
self.check_uint_safe(arrmax, colname)
return array.astype(uint32)
elif colname == 'day':
nanos_per_second = (1000 * 1000 * 1000)
self.check_uint_safe(arrmax.view(int) / nanos_per_second, colname)
return (array.view(int) / nanos_per_second).astype(uint32)
@staticmethod
def check_uint_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
class BcolzDailyBarReader(object):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
A Bcolz CTable is comprised of Columns and Attributes.
Columns
-------
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
calendar : list[int64]
Calendar used to compute offsets, in asi8 format (ns since EPOCH).
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
"""
@preprocess(table=coerce_string(open_ctable, mode='r'))
def __init__(self, table):
self._table = table
self._calendar = DatetimeIndex(table.attrs['calendar'], tz='UTC')
self._first_rows = {
int(asset_id): start_index
for asset_id, start_index in iteritems(table.attrs['first_row'])
}
self._last_rows = {
int(asset_id): end_index
for asset_id, end_index in iteritems(table.attrs['last_row'])
}
self._calendar_offsets = {
int(id_): offset
for id_, offset in iteritems(table.attrs['calendar_offset'])
}
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self._calendar.get_loc(start_date)
end_idx = self._calendar.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
[column.name for column in columns],
first_rows,
last_rows,
offsets,
)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname][:]
return col
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
day_loc = self._calendar.get_loc(day)
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataOnDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataOnDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def spot_price(self, sid, day, colname):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, day)
price = self._spot_col(colname)[ix]
if price == 0:
return -1
if colname != 'volume':
return price * 0.001
else:
return price
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
SQLiteAdjustmentReader
"""
def __init__(self, conn_or_path, calendar, daily_bar_reader,
overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite and exists(conn_or_path):
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._daily_bar_reader = daily_bar_reader
self._calendar = calendar
def write_frame(self, tablename, frame):
if frozenset(frame.columns) != SQLITE_ADJUSTMENT_COLUMNS:
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
SQLITE_ADJUSTMENT_COLUMNS,
frame.columns.tolist(),
)
)
elif tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename, SQLITE_ADJUSTMENT_TABLENAMES
)
)
expected_dtypes = SQLITE_ADJUSTMENT_COLUMN_DTYPES
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column '{colname}', "
"but got {actual}.".format(
expected=expected,
colname=colname,
actual=actual,
)
)
return frame.to_sql(tablename, self.conn)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
if frozenset(frame.columns) != SQLITE_DIVIDEND_PAYOUT_COLUMNS:
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
sorted(SQLITE_DIVIDEND_PAYOUT_COLUMNS),
sorted(frame.columns.tolist()),
)
)
expected_dtypes = SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column '{colname}', "
"but got {actual}.".format(
expected=expected,
colname=colname,
actual=actual,
)
)
return frame.to_sql('dividend_payouts', self.conn)
def write_stock_dividend_payouts(self, frame):
if frozenset(frame.columns) != SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMNS:
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
sorted(SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMNS),
sorted(frame.columns.tolist()),
)
)
expected_dtypes = SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column '{colname}', "
"but got {actual}.".format(
expected=expected,
colname=colname,
actual=actual,
)
)
return frame.to_sql('stock_dividend_payouts', self.conn)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
ex_dates = dividends.ex_date.values
sids = dividends.sid.values
amounts = dividends.amount.values
ratios = full(len(amounts), nan)
daily_bar_reader = self._daily_bar_reader
calendar = self._calendar
effective_dates = full(len(amounts), -1, dtype=int64)
for i, amount in enumerate(amounts):
sid = sids[i]
ex_date = ex_dates[i]
day_loc = calendar.get_loc(ex_date)
prev_close_date = calendar[day_loc - 1]
try:
prev_close = daily_bar_reader.spot_price(
sid, prev_close_date, 'close')
if prev_close != 0.0:
ratio = 1.0 - amount / prev_close
ratios[i] = ratio
# only assign effective_date when data is found
effective_dates[i] = ex_date
except NoDataOnDate:
logger.warn("Couldn't compute ratio for dividend %s" % {
'sid': sid,
'ex_date': ex_date,
'amount': amount,
})
continue
# Create a mask to filter out indices in the effective_date, sid, and
# ratio vectors for which a ratio was not calculable.
effective_mask = effective_dates != -1
effective_dates = effective_dates[effective_mask]
effective_dates = effective_dates.astype('datetime64[ns]').\
astype('datetime64[s]').astype(uint32)
sids = sids[effective_mask]
ratios = ratios[effective_mask]
return DataFrame({
'sid': sids,
'effective_date': effective_dates,
'ratio': ratios,
})
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
dividend_payouts = dividends.copy()
dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['record_date'] = \
dividend_payouts['record_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['declared_date'] = \
dividend_payouts['declared_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['pay_date'] = \
dividend_payouts['pay_date'].values.astype('datetime64[s]').\
astype(integer)
self.write_dividend_payouts(dividend_payouts)
if stock_dividends is not None:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts['ex_date'] = \
stock_dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['record_date'] = \
stock_dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['declared_date'] = \
stock_dividend_payouts['declared_date'].\
values.astype('datetime64[s]').astype(integer)
stock_dividend_payouts['pay_date'] = \
stock_dividend_payouts['pay_date'].\
values.astype('datetime64[s]').astype(integer)
else:
stock_dividend_payouts = DataFrame({
'sid': array([], dtype=uint32),
'record_date': array([], dtype=uint32),
'ex_date': array([], dtype=uint32),
'declared_date': array([], dtype=uint32),
'pay_date': array([], dtype=uint32),
'payment_sid': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
self.write_stock_dividend_payouts(stock_dividend_payouts)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios)
def write(self, splits, mergers, dividends, stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame
Dataframe containing split data.
mergers : pandas.DataFrame
DataFrame containing merger data.
dividends : pandas.DataFrame
DataFrame containing dividend data.
Notes
-----
DataFrame input (`splits`, `mergers`) should all have
the following columns:
effective_date : int
The date, represented as seconds since Unix epoch, on which the
adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
sid : int
The asset id associated with this adjustment.
The ratio column is interpreted as follows:
- For all adjustment types, multiply price fields ('open', 'high',
'low', and 'close') by the ratio.
- For **splits only**, **divide** volume by the adjustment ratio.
DataFrame input, 'dividends' should have the following columns:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to receive
payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as
1.0 - (dividend_value / "close on day prior to dividend ex_date").
DataFrame input, 'stock_dividends' should have the following columns:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to receive
payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of cash.
ratio: float
The ratio of currently held shares in the held sid that should
be paid with new shares of the payment_sid.
stock_dividends is optional.
Returns
-------
None
See Also
--------
SQLiteAdjustmentReader : Consumer for the data written by this class
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
)
def close(self):
self.conn.close()
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
"""
@preprocess(conn=coerce_string(sqlite3.connect))
def __init__(self, conn):
self.conn = conn
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
[column.name for column in columns],
dates,
assets,
)
| {
"content_hash": "ed7e9726f7c6177fb9ad9aceababa48e",
"timestamp": "",
"source": "github",
"line_count": 911,
"max_line_length": 79,
"avg_line_length": 34.18770581778266,
"alnum_prop": 0.5666720179804142,
"repo_name": "grundgruen/zipline",
"id": "dcd9b1352ff462400bd1a3d640c3223cf9fbb27a",
"size": "31725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/data/us_equity_pricing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "800"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "168399"
},
{
"name": "Python",
"bytes": "1809750"
},
{
"name": "Shell",
"bytes": "4284"
}
],
"symlink_target": ""
} |
from courtreader import readers
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import os
import sys
import time
MONGO = False
POSTGRES = True
if MONGO:
import pymongo
from courtutils.databases.mongo import MongoDatabase
if POSTGRES:
from courtutils.databases.postgres import PostgresDatabase
# get command line args
start_date = datetime.strptime(sys.argv[1], '%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2], '%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
court_type = sys.argv[3]
if court_type != 'circuit' and court_type != 'district':
raise ValueError('Unknown court type')
case_type = sys.argv[4]
if case_type != 'criminal' and case_type != 'civil':
raise ValueError('Unknown case type')
# connect to database
db = None
if MONGO: db = MongoDatabase('va_court_search', court_type)
if POSTGRES: db = PostgresDatabase(court_type)
# get the courts to create tasks for
# check command line args for a specific court
courts = list(db.get_courts())
if len(sys.argv) > 5:
courts = [court for court in courts if court['fips'] == sys.argv[5]]
# create the tasks
tasks = []
for court in courts:
tasks.append({
'fips': court['fips'],
'start_date': start_date,
'end_date': end_date,
'case_type': case_type
})
# add the tasks to the database
db.add_date_tasks(tasks)
print 'Created', len(tasks), 'tasks'
| {
"content_hash": "d90ec6a9b339f132db87eb092b0f5662",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 26.945454545454545,
"alnum_prop": 0.699055330634278,
"repo_name": "bschoenfeld/va-court-scraper",
"id": "a6414e09db0945bef8c897dc339977902c5f5f29",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "court_bulk_task_creator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43814"
},
{
"name": "Python",
"bytes": "189578"
}
],
"symlink_target": ""
} |
"""
Test suite for SocketServer.py.
"""
import contextlib
import imp
import os
import select
import signal
import socket
import select
import errno
import tempfile
import unittest
import SocketServer
import test.test_support
from test.test_support import reap_children, reap_threads, verbose
try:
import threading
except ImportError:
threading = None
test.test_support.requires("network")
TEST_STR = "hello world\n"
HOST = test.test_support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError, "timed out on %r" % (sock,)
if HAVE_UNIX_SOCKETS:
class ForkingUnixStreamServer(SocketServer.ForkingMixIn,
SocketServer.UnixStreamServer):
pass
class ForkingUnixDatagramServer(SocketServer.ForkingMixIn,
SocketServer.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except os.error:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
if os.name == 'os2':
dir = '\socket'
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
if os.name == 'os2':
# AF_UNIX socket names on OS/2 require a specific prefix
# which can't include a drive letter and must also use
# backslashes as directory separators
if fn[1] == ':':
fn = fn[2:]
if fn[0] in (os.sep, os.altsep):
fn = fn[1:]
if os.sep == '/':
fn = fn.replace(os.sep, os.altsep)
else:
fn = fn.replace(os.altsep, os.sep)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print "creating server"
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", svrcls
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
for i in range(3):
if verbose: print "test client", i
testfunc(svrcls.address_family, addr)
if verbose: print "waiting for server"
server.shutdown()
t.join()
server.server_close()
self.assertRaises(socket.error, server.socket.fileno)
if verbose: print "done"
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(SocketServer.ThreadingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(SocketServer.UnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(SocketServer.ThreadingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(SocketServer.UDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(SocketServer.ThreadingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
@contextlib.contextmanager
def mocked_select_module(self):
"""Mocks the select.select() call to raise EINTR for first call"""
old_select = select.select
class MockSelect:
def __init__(self):
self.called = 0
def __call__(self, *args):
self.called += 1
if self.called == 1:
# raise the exception on first call
raise select.error(errno.EINTR, os.strerror(errno.EINTR))
else:
# Return real select value for consecutive calls
return old_select(*args)
select.select = MockSelect()
try:
yield select.select
finally:
select.select = old_select
def test_InterruptServerSelectCall(self):
with self.mocked_select_module() as mock_select:
pid = self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
# Make sure select was called again:
self.assertGreater(mock_select.called, 1)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
# @requires_unix_sockets
# def test_UnixDatagramServer(self):
# self.run_server(SocketServer.UnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# @requires_unix_sockets
# def test_ThreadingUnixDatagramServer(self):
# self.run_server(SocketServer.ThreadingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# @requires_unix_sockets
# @requires_forking
# def test_ForkingUnixDatagramServer(self):
# self.run_server(SocketServer.ForkingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(SocketServer.TCPServer):
pass
class MyHandler(SocketServer.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
SocketServer.TCPServer((HOST, -1),
SocketServer.StreamRequestHandler)
def test_main():
if imp.lock_held():
# If the import lock is held, the threads will hang
raise unittest.SkipTest("can't run when import lock is held")
test.test_support.run_unittest(SocketServerTest)
if __name__ == "__main__":
test_main()
| {
"content_hash": "90da80eb2418ec181ae5c66a39b57434",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 79,
"avg_line_length": 34.071005917159766,
"alnum_prop": 0.5726814866273011,
"repo_name": "mcking49/apache-flask",
"id": "714ca4afb0a8f998d56e86d9e85651d30a9724da",
"size": "11516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Lib/test/test_socketserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2501"
},
{
"name": "C",
"bytes": "479174"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "170391"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "1003190"
},
{
"name": "JavaScript",
"bytes": "1559701"
},
{
"name": "PHP",
"bytes": "3338"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "30714489"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import frappe, unittest
from frappe.defaults import *
class TestDefaults(unittest.TestCase):
def test_global(self):
set_global_default("key1", "value1")
self.assertEquals(get_global_default("key1"), "value1")
set_global_default("key1", "value2")
self.assertEquals(get_global_default("key1"), "value2")
add_global_default("key1", "value3")
self.assertEquals(get_global_default("key1"), "value2")
self.assertEquals(get_defaults()["key1"], ["value2", "value3"])
self.assertEquals(get_user_default_as_list("key1"), ["value2", "value3"])
def test_user(self):
set_user_default("key1", "2value1")
self.assertEquals(get_user_default_as_list("key1"), ["2value1"])
set_user_default("key1", "2value2")
self.assertEquals(get_user_default("key1"), "2value2")
add_user_default("key1", "3value3")
self.assertEquals(get_user_default("key1"), "2value2")
self.assertEquals(get_user_default_as_list("key1"), ["2value2", "3value3"])
def test_global_if_not_user(self):
set_global_default("key4", "value4")
self.assertEquals(get_user_default("key4"), "value4")
def test_clear(self):
set_user_default("key5", "value5")
self.assertEquals(get_user_default("key5"), "value5")
clear_user_default("key5")
self.assertEquals(get_user_default("key5"), None)
def test_clear_global(self):
set_global_default("key6", "value6")
self.assertEquals(get_user_default("key6"), "value6")
clear_default("key6", value="value6")
self.assertEquals(get_user_default("key6"), None)
| {
"content_hash": "a47aa962e918f1bb6b1571a449998e15",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 34.20454545454545,
"alnum_prop": 0.6970099667774087,
"repo_name": "gangadhar-kadam/hrfrappe",
"id": "2c0fbb4c615384a3dc9a04ed9246c42caa246857",
"size": "1609",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_defaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105798"
},
{
"name": "JavaScript",
"bytes": "1458963"
},
{
"name": "Python",
"bytes": "714974"
}
],
"symlink_target": ""
} |
from collections import deque
import random
class ReplayBuffer(object):
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.num_experiences = 0
self.buffer = deque()
def get_batch(self, batch_size):
# Randomly sample batch_size examples
return random.sample(self.buffer, batch_size)
def size(self):
return self.buffer_size
def add(self, state, program_order,action, reward, new_state, done):
experience = (state, program_order,action, reward, new_state, done)
if self.num_experiences < self.buffer_size:
self.buffer.append(experience)
self.num_experiences += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def count(self):
# if buffer is full, return buffer size
# otherwise, return experience counter
return self.num_experiences
def erase(self):
self.buffer = deque()
self.num_experiences = 0
| {
"content_hash": "bed6585e5351dbfd28e790119f7de5d7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 29.88235294117647,
"alnum_prop": 0.625,
"repo_name": "jaesik817/programmable-agents_tensorflow",
"id": "c3520abcc3c1050683acc109ba93ad97dcdbae1e",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "replay_buffer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67800"
},
{
"name": "Shell",
"bytes": "837"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'stations/$', views.get_stations, name='api_stations'),
url(r'entry/(?P<station_id>\d+)/$', views.make_entry, name='api_entry'),
url(r'new/$', views.add_station, name='api_add_station'),
# Booking api
url(r'booking/(?P<resident_id>\d+)/$', views.booking, name='api_booking'),
url(r'book_profile/$', views.book_profile, name='api_book_profile'),
url(r'book_phone/$', views.book_phone, name='api_book_phone'),
url(r'book_code/$', views.book_code, name='api_book_code'),
# Insure api
url(r'insure/$', views.insure, name='api_insure'),
# Drugshare api
url(r'register_pharm/$', views.register_pharm, name='api_register_pharm'),
url(r'make_token/(?P<device_id>\d+)/$',
views.make_token, name='api_make_token'),
url(r'add_device/$', views.add_device, name='api_add_device'),
url(r'get_profile/$', views.get_profile, name='api_get_profile'),
url(r'update_pharm/(?P<device_id>\d+)/$',
views.update_pharm, name='api_update_pharm'),
url(r'add_outlet/(?P<device_id>\d+)/$',
views.add_outlet, name='api_add_outlet'),
url(r'delete_outlet/(?P<id>\d+)/$',
views.delete_outlet, name='api_delete_outlet'),
url(r'add_drug/$', views.add_drug, name='api_add_drug'),
url(r'edit_drug/(?P<id>\d+)/$', views.edit_drug, name='api_edit_drug'),
url(r'search_drug/(?P<device_id>\d+)/$',
views.search_drug, name='api_search_drug'),
url(r'wish_drug/(?P<device_id>\d+)/$',
views.wishlist_drug, name='api_wishlist_drug'),
url(r'stock_drug/(?P<device_id>\d+)/$',
views.stock_drug, name='api_stock_drug'),
url(r'remove_drug/(?P<id>\d+)/$',
views.remove_drug, name='api_remove_drug'),
url(r'recent_drugs/(?P<count>\d+)/$',
views.recent_drugs, name='api_recent_drugs'),
url(r'request_drug/(?P<drug_id>\d+)/$',
views.request_drug, name='api_request_drug'),
url(r'pending/(?P<device_id>\d+)/$',
views.pending_requests, name='api_pending_requests'),
url(r'accept/(?P<request_id>\d+)/$', views.accept, name='api_accept'),
url(r'reject/(?P<request_id>\d+)/$', views.reject, name='api_reject'),
url(r'drug_list/$', views.list_generic_drugs, name='api_drugs_list'),
url(r'feedback/(?P<id>\d+)/$', views.feedback, name='api_feedback'),
]
| {
"content_hash": "7608e400e7857e9d0a0295974b945761",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 49.645833333333336,
"alnum_prop": 0.6093159882501049,
"repo_name": "boyombo/django-stations",
"id": "318aeb5e61ddee206e058b93fe6b240d0db2332a",
"size": "2383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stations/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13209"
},
{
"name": "HTML",
"bytes": "182453"
},
{
"name": "JavaScript",
"bytes": "1181"
},
{
"name": "Python",
"bytes": "157368"
}
],
"symlink_target": ""
} |
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.5.1-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import swaggyjenkins
from swaggyjenkins.model.extension_class_impllinks import ExtensionClassImpllinks
globals()['ExtensionClassImpllinks'] = ExtensionClassImpllinks
from swaggyjenkins.model.extension_class_impl import ExtensionClassImpl
class TestExtensionClassImpl(unittest.TestCase):
"""ExtensionClassImpl unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionClassImpl(self):
"""Test ExtensionClassImpl"""
# FIXME: construct object with mandatory attributes with example values
# model = ExtensionClassImpl() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "04da989dc17934e8819f0e95190c2b0f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 25.18421052631579,
"alnum_prop": 0.7136886102403344,
"repo_name": "cliffano/swaggy-jenkins",
"id": "0c876d88d201c7662992fe3c4cd1884b693403c0",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python/generated/test/test_extension_class_impl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
} |
import os
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
from mediafire.client import MediaFireClient
APP_ID = '42511'
MEDIAFIRE_EMAIL = os.environ['MEDIAFIRE_EMAIL']
MEDIAFIRE_PASSWORD = os.environ['MEDIAFIRE_PASSWORD']
client = MediaFireClient()
client.login(app_id=APP_ID, email=MEDIAFIRE_EMAIL, password=MEDIAFIRE_PASSWORD)
print("Use 'client' object to interact with MediaFireClient")
| {
"content_hash": "6df68fabc7c7e6fcf256a51effbfdbd2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 26.8125,
"alnum_prop": 0.7925407925407926,
"repo_name": "MediaFire/mediafire-python-open-sdk",
"id": "5f59b7cf644bb10e0a5cd7a62ebc28f6373a3149",
"size": "449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/ipython-client-session.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "122947"
}
],
"symlink_target": ""
} |
from .base import ObjectBase
class Onboarding(ObjectBase):
@classmethod
def get_resource_class(cls, client):
from ..resources import Onboarding as OnboardingResource
return OnboardingResource(client)
STATUS_NEEDS_DATA = "needs-data"
STATUS_IN_REVIEW = "in-review" # Waiting for a valid mandate.
STATUS_COMPLETED = "completed"
@property
def resource(self):
return self._get_property("resource")
@property
def name(self):
return self._get_property("name")
@property
def signed_up_at(self):
return self._get_property("signedUpAt")
@property
def status(self):
return self._get_property("status")
@property
def can_receive_payments(self):
return self._get_property("canReceivePayments")
@property
def can_receive_settlements(self):
return self._get_property("canReceiveSettlements")
def is_needs_data(self):
return self.status == self.STATUS_NEEDS_DATA
def is_in_review(self):
return self.status == self.STATUS_IN_REVIEW
def is_completed(self):
return self.status == self.STATUS_COMPLETED
def get_organization(self):
"""Retrieve the related organization."""
url = self._get_link("organization")
if url:
return self.client.organizations.from_url(url)
| {
"content_hash": "7ec0c80761feee301a76896a65960c22",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 66,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.6498538011695907,
"repo_name": "mollie/mollie-api-python",
"id": "da9f5646b1f340e81f39bc2e6ee08fa08e67eb9f",
"size": "1368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mollie/api/objects/onboarding.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1357"
},
{
"name": "Python",
"bytes": "231732"
}
],
"symlink_target": ""
} |
import logging
# Import Third-Party
# Import Homebrew
from bitex.formatters.base import Formatter
# Init Logging Facilities
log = logging.getLogger(__name__)
class GdaxFormatter(Formatter):
@staticmethod
def ticker(data, *args, **kwargs):
return (data['bid'], data['ask'], None, None, None, None, data['price'],
data['volume'], data['time']) | {
"content_hash": "f6650b5196133be11d120987a74a17fc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 22.294117647058822,
"alnum_prop": 0.6596306068601583,
"repo_name": "nlsdfnbch/bitex-crawler",
"id": "1f51f5e3f3a3301f0c7ea7fb5cda587dce8f5945",
"size": "398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bitex/formatters/gdax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33916"
}
],
"symlink_target": ""
} |
from django.urls import include, re_path
from django.views.i18n import JavaScriptCatalog
urlpatterns = [
re_path(r'^', include('grade_conversion_calculator.urls')),
re_path(
r'^jsi18n/$',
JavaScriptCatalog.as_view(packages=['grade_conversion_calculator']),
name='javascript-catalog')
]
| {
"content_hash": "c1caf6fe646e21aa29577e8306a34ef4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.684375,
"repo_name": "uw-it-aca/grade-conversion-calculator",
"id": "cd10c36f9a5c22012a7adc52616a2d5392e7f7ef",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "conf/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4898"
},
{
"name": "HTML",
"bytes": "9365"
},
{
"name": "JavaScript",
"bytes": "17088"
},
{
"name": "Python",
"bytes": "3407"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six
url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ("http", "https", None)
# Almost all of these patterns were derived from the
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
re.UNICODE | re.DOTALL,
)
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
IPV4_RE = re.compile("^" + IPV4_PAT + "$")
IPV6_RE = re.compile("^" + IPV6_PAT + "$")
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
REG_NAME_PAT,
IPV4_PAT,
IPV6_ADDRZ_PAT,
)
SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL)
UNRESERVED_CHARS = set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
)
SUB_DELIM_CHARS = set("!$&'()*+,;=")
USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
PATH_CHARS = USERINFO_CHARS | {"@", "/"}
QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
class Url(namedtuple("Url", url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(
cls,
scheme=None,
auth=None,
host=None,
port=None,
path=None,
query=None,
fragment=None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(
cls, scheme, auth, host, port, path, query, fragment
)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return "%s:%d" % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u"://"
if auth is not None:
url += auth + u"@"
if host is not None:
url += host
if port is not None:
url += u":" + str(port)
if path is not None:
url += path
if query is not None:
url += u"?" + query
if fragment is not None:
url += u"#" + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = six.ensure_text(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode(encoding)
def _remove_path_dot_segments(path):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
elif segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
def _normalize_host(host, scheme):
if host:
if isinstance(host, six.binary_type):
host = six.ensure_str(host)
if scheme in NORMALIZABLE_SCHEMES:
is_ipv6 = IPV6_ADDRZ_RE.match(host)
if is_ipv6:
match = ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
return host[:start].lower() + zone_id + host[end:]
else:
return host.lower()
elif not IPV4_RE.match(host):
return six.ensure_str(
b".".join([_idna_encode(label) for label in host.split(".")])
)
return host
def _idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
import idna
except ImportError:
six.raise_from(
LocationParseError("Unable to parse URL without the 'idna' module"),
None,
)
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
six.raise_from(
LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
)
return name.lower().encode("ascii")
def _encode_target(target):
"""Percent-encodes a request target so that there are no invalid characters"""
path, query = TARGET_RE.match(target).groups()
target = _encode_invalid_chars(path, PATH_CHARS)
query = _encode_invalid_chars(query, QUERY_CHARS)
if query is not None:
target += "?" + query
return target
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not SCHEME_RE.search(url):
url = "//" + url
try:
scheme, authority, path, query, fragment = URI_RE.match(url).groups()
normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, host, port = SUBAUTHORITY_RE.match(authority).groups()
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port = int(port)
if not (0 <= port <= 65535):
raise LocationParseError(url)
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
except (ValueError, AttributeError):
return six.raise_from(LocationParseError(source_url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
if isinstance(url, six.text_type):
ensure_func = six.ensure_text
else:
ensure_func = six.ensure_str
def ensure_type(x):
return x if x is None else ensure_func(x)
return Url(
scheme=ensure_type(scheme),
auth=ensure_type(auth),
host=ensure_type(host),
port=port,
path=ensure_type(path),
query=ensure_type(query),
fragment=ensure_type(fragment),
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or "http", p.hostname, p.port
| {
"content_hash": "aa0f32a3b0875a42bfddfa041478151d",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 88,
"avg_line_length": 32.47441860465116,
"alnum_prop": 0.5443282727012317,
"repo_name": "javier-ruiz-b/docker-rasppi-images",
"id": "6ff238fe3cbd04f5a63862e8b6729e155e03360c",
"size": "13964",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "raspberry-google-home/env/lib/python3.7/site-packages/urllib3/util/url.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "15254"
},
{
"name": "PHP",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
import socks
import socket
import stem.process
import stem.control
# Version check for STEM >= 1.3
assert(int(stem.__version__[0]) > 1 or
(int(stem.__version__[0]) == 1 and int(stem.__version__[2]) >= 3))
SOCKS_PORT = 33419
CONTROL_PORT = 33418
class Tor:
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Tor, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, config=None):
self.config = config
self.tor_process = None
self.original_socket = None
self.original_getaddrinfo = None
def start_tor(self):
print "Starting Tor"
self.tor_process = stem.process.launch_tor_with_config(
config = {
'SocksPort': str(SOCKS_PORT),
'ControlPort': str(CONTROL_PORT),
},
init_msg_handler = self._print_bootstrap_lines,
)
def _print_bootstrap_lines(self, line):
print '%s' % line
pass
def _create_path(self, name):
if self.config:
os.path.join(self.config.workdir, "tor", name)
else:
return name
def stop_tor(self):
print "Stopping Tor"
self.tor_process.kill()
def create_hidden_service(self, name, port, target):
controller = stem.control.Controller.from_port(port=CONTROL_PORT)
controller.authenticate()
path = self._create_path(name)
return controller.create_hidden_service(path, port, target)
def destroy_hidden_service(self, name):
controller = stem.control.Controller.from_port(port=CONTROL_PORT)
controller.authenticate()
path = self._create_path(name)
controller.remove_hidden_service(name)
def get_hidden_service_conf(self):
controller = stem.control.Controller.from_port(port=CONTROL_PORT)
controller.authenticate()
return controller.get_hidden_service_conf()
def start_proxying_through_tor(self):
if self.original_socket:
print "We're (probably) already proxying through Tor"
return
self.original_socket = socket.socket
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', SOCKS_PORT)
socket.socket = socks.socksocket
def stop_proxying_through_tor(self):
if self.original_socket:
socket.socket = self.original_socket
self.original_socket = None
def start_proxying_dns_through_tor(self):
if self.original_getaddrinfo:
print "We're already proxying DNS throug Tor"
return
self.original_getaddrinfo = socket.getaddrinfo
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
def stop_proxying_dns_through_tor(self):
if self.original_getaddrinfo:
socket.getaddrinfo = self.original_getaddrinfo
self.original_getaddrinfo = None
if __name__ == "__main__":
import urllib
url = "http://wtfismyip.com/text"
print "BEWARE: This test script will probably leak your actual IP to wtfismyip.com."
print " But you shouldn't have run a test script that could fail if you didn't want this to happen."
print "Your IP (not torified): %s" % (urllib.urlopen(url).read().strip())
t = Tor()
t.start_tor()
t.start_proxying_through_tor()
print "Your IP (torified): %s" % (urllib.urlopen(url).read().strip())
t.stop_proxying_through_tor()
print "Creating hidden service..."
t.create_hidden_service("test", 80, "localhost:3000")
print t.get_hidden_service_conf()
input("Hit enter to disable service")
t.destroy_hidden_service("meteor")
t.stop_tor()
| {
"content_hash": "60298dbdd45b525471112e02a6f35590",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 111,
"avg_line_length": 30.20472440944882,
"alnum_prop": 0.6173096976016684,
"repo_name": "jparyani/Mailpile",
"id": "5b9061f984a3940d9b0ba2d8b78d0c44380ceffc",
"size": "3836",
"binary": false,
"copies": "1",
"ref": "refs/heads/sandstorm",
"path": "mailpile/crypto/tor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "130573"
},
{
"name": "Cap'n Proto",
"bytes": "1254"
},
{
"name": "JavaScript",
"bytes": "577920"
},
{
"name": "Makefile",
"bytes": "6768"
},
{
"name": "Python",
"bytes": "1310077"
},
{
"name": "Shell",
"bytes": "19360"
}
],
"symlink_target": ""
} |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| {
"content_hash": "8bbc67d393dc1a4036a82f3a8f5dc764",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 146,
"avg_line_length": 43.14383561643836,
"alnum_prop": 0.5874477430279939,
"repo_name": "gquirozbogner/contentbox-master",
"id": "7d6fdc999ea58bdb8b2542131766972f2aec521b",
"size": "18897",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "third_party/httplib2/socks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "420520"
},
{
"name": "HTML",
"bytes": "54100"
},
{
"name": "JavaScript",
"bytes": "1778"
},
{
"name": "Python",
"bytes": "72821"
},
{
"name": "Ruby",
"bytes": "413"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os
import unittest
import tempfile
try:
from unittest import mock
except ImportError:
import mock
from fs.archive import _utils
class TestUtils(unittest.TestCase):
@unittest.skipUnless(os.name == 'posix', 'POSIX platform needed')
def test_writable_path(self):
self.assertFalse(_utils.writable_path('/'))
self.assertFalse(_utils.writable_path('/root_location'))
self.assertTrue(_utils.writable_path(__file__))
def test_writable_stream(self):
with tempfile.NamedTemporaryFile(mode='wb+') as tmp:
self.assertTrue(_utils.writable_stream(tmp))
with open(tmp.name, 'rb') as tmp2:
self.assertFalse(_utils.writable_stream(tmp2))
buff = io.BytesIO()
self.assertTrue(_utils.writable_stream(buff))
buff = io.BufferedReader(buff)
self.assertFalse(_utils.writable_stream(buff))
buff = mock.MagicMock()
buff.write = mock.MagicMock(side_effect=IOError("not writable"))
self.assertFalse(_utils.writable_stream(buff))
def test_import_from_names(self):
imp = _utils.import_from_names
self.assertIs(imp('os'), os)
self.assertIs(imp('akjhkjhsk', 'os'), os)
self.assertIs(imp('akeskjhk'), None)
def test_unique(self):
self.assertEqual(
list(_utils.unique(iter('aaabbbccdef'))),
list('abcdef')
)
self.assertEqual(
list(_utils.unique(['a', 'aa', 'bb', 'ccc', 'ddd'], key=len)),
['a', 'aa', 'ccc',]
)
def test_universal_container(self):
c = _utils.UniversalContainer()
self.assertIn(1, c)
self.assertIn(None, c)
| {
"content_hash": "d1077e4130ac7e00d24231611ab0ce36",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 30.372881355932204,
"alnum_prop": 0.6166294642857143,
"repo_name": "althonos/fs.archive",
"id": "2e1774302b70917a57f5ee920e21f8b76b4ac707",
"size": "1808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125809"
}
],
"symlink_target": ""
} |
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
import numpy as np
from scipy._lib.six import xrange
from .sputils import upcast, get_index_dtype
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : str, optional
Format of the result. By default (format=None) an appropriate sparse
matrix format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> from scipy.sparse import spdiags
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
>>> diags = np.array([0, -1, 2])
>>> spdiags(data, diags, 4, 4).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int or an int, optional
Diagonals to set:
- k = 0 the main diagonal (default)
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
.. versionadded:: 0.11
Examples
--------
>>> from scipy.sparse import diags
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
>>> diags(diagonals, [0, -1, 2]).toarray()
array([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
array([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).toarray()
array([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
K = min(m, n)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset, K)
if length <= 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
diagonal = np.asarray(diagonal)
if diagonal.ndim == 0:
diagonal = diagonal[None]
try:
data_arr[j, k:k+length] = diagonal[...,:length]
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : int
Shape of the identity matrix.
dtype : dtype, optional
Data type of the matrix
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy.sparse import identity
>>> identity(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
m : int
Number of rows in the matrix.
n : int, optional
Number of columns. Default: `m`.
k : int, optional
Diagonal to place ones on. Default: 0 (main diagonal).
dtype : dtype, optional
Data type of the matrix.
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
idx_dtype = get_index_dtype(maxval=n)
indptr = np.arange(n+1, dtype=idx_dtype)
indices = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
idx_dtype = get_index_dtype(maxval=n)
row = np.arange(n, dtype=idx_dtype)
col = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : str
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def _compressed_sparse_stack(blocks, axis):
"""
Stacking fast path for CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = np.concatenate([b.data for b in blocks])
indices = np.concatenate([b.indices for b in blocks])
indptr = []
last_indptr = 0
constant_dim = blocks[0].shape[other_axis]
sum_dim = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError('incompatible dimensions for axis %d' % other_axis)
sum_dim += b.shape[axis]
indptr.append(b.indptr[:-1] + last_indptr)
last_indptr += b.indptr[-1]
indptr.append([last_indptr])
indptr = np.concatenate(indptr)
if axis == 0:
return csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim))
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> hstack([A,B]).toarray()
array([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str, optional
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5, 6]])
>>> vstack([A, B]).toarray()
array([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). By default an
appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> bmat([[A, B], [None, C]]).toarray()
array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat([[A, None], [None, C]]).toarray()
array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if blocks.ndim != 2:
raise ValueError('blocks must be 2-D')
M,N = blocks.shape
# check for fast path cases
if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix)
for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[:,0], 0)
if dtype is not None:
A = A.astype(dtype)
return A
elif (M == 1 and format in (None, 'csc')
and all(isinstance(b, csc_matrix) for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[0,:], 1)
if dtype is not None:
A = A.astype(dtype)
return A
block_mask = np.zeros(blocks.shape, dtype=bool)
brow_lengths = np.zeros(M, dtype=np.int64)
bcol_lengths = np.zeros(N, dtype=np.int64)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin())
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin())
nnz = sum([block.nnz for block in blocks[block_mask]])
if dtype is None:
dtype = upcast(*tuple([blk.dtype for blk in blocks[block_mask]]))
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
data = np.empty(nnz, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = np.empty(nnz, dtype=idx_dtype)
col = np.empty(nnz, dtype=idx_dtype)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
B = blocks[i,j]
data[nnz:nnz + B.nnz] = B.data
row[nnz:nnz + B.nnz] = B.row
col[nnz:nnz + B.nnz] = B.col
row[nnz:nnz + B.nnz] += row_offsets[i]
col[nnz:nnz + B.nnz] += col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
Parameters
----------
mats : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
bmat, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, block_diag
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).toarray()
array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and (dtype not in [np.float32, np.float64, np.longdouble]):
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
# We should use this line, but choice is only available in numpy >= 1.7
# ind = random_state.choice(mn, size=k, replace=False)
ind = random_state.permutation(mn)[:k]
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
return random(m, n, density, format, dtype, random_state)
| {
"content_hash": "347027e3970846d420843b911e40e3ba",
"timestamp": "",
"source": "github",
"line_count": 803,
"max_line_length": 95,
"avg_line_length": 30.29389788293898,
"alnum_prop": 0.5539751705993587,
"repo_name": "Shaswat27/scipy",
"id": "cdff960d2fa488f25579aa5c5f3dee2551523584",
"size": "24326",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "scipy/sparse/construct.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4240488"
},
{
"name": "C++",
"bytes": "3691569"
},
{
"name": "FORTRAN",
"bytes": "5661284"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "9827641"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""Tool for mounting AFF4 datastore over FUSE."""
import datetime
import errno
import getpass
import stat
import sys
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow_utils
from grr.lib import rdfvalue
from grr.lib import startup
from grr.lib import type_info
from grr.lib import utils
from grr.lib.aff4_objects import security
from grr.lib.aff4_objects import standard
from grr.lib.rdfvalues import client
# Check if fuse is installed. If it's not, set it to None so we know to mock it
# out later.
try:
# pylint: disable=g-import-not-at-top
import fuse
# pylint: enable=g-import-not-at-top
except (EnvironmentError, ImportError):
# We check for ImportErrors and EnvironmentErrors since submit checks throw an
# EnvironmentError when fuse isn't installed.
fuse = None
flags.DEFINE_string("aff4path", "/",
"Path in AFF4 to use as the root of the filesystem.")
flags.DEFINE_string("mountpoint", None,
"Path to point at which the system should be mounted.")
flags.DEFINE_bool("background", False,
"Whether or not to run the filesystem in the background,"
" not viewing debug information.")
flags.DEFINE_float("timeout", 30,
"How long to poll a flow for before giving up.")
flags.DEFINE_integer("max_age_before_refresh", 60*5,
"Measured in seconds. Do a client-side update if it's"
" been this long since we last did one.")
flags.DEFINE_bool("ignore_cache", False,
"Disables cache completely. Takes priority over"
" refresh_policy.")
flags.DEFINE_enum("refresh_policy", "if_older_than_max_age",
["if_older_than_max_age", "always", "never"],
"How to refresh the cache. Options are: always (on every"
" client-side access), never, or, by default,"
" if_older_than_max_age (if last accessed > max_age seconds"
" ago).", type=str)
flags.DEFINE_bool("force_sparse_image", False,
"Whether to convert existing files bigger than the"
" size threshold to new, empty AFF4SparseImages.")
flags.DEFINE_integer("sparse_image_threshold", 1024*1024*1024,
"If a client side file that's not in the datastore yet"
" is >= than this size, then store it as a sparse image.")
flags.DEFINE_string("username", None,
"Username to use for client authorization check.")
flags.DEFINE_string("reason", None,
"Reason to use for client authorization check. This "
"needs to match the string in your approval request.")
# The modes we'll use for aff4 objects that aren't really files.
# Taken from /etc
_DEFAULT_MODE_FILE = 33188
# Taken from /etc/passwd
_DEFAULT_MODE_DIRECTORY = 16877
class GRRFuseDatastoreOnly(object):
"""We implement the FUSE methods in this class."""
# Directories to hide. Readdir will not return them.
ignored_dirs = [
# We don't want to show AFF4Index objects.
"/index/client"
]
def __init__(self, root="/", token=None):
self.root = rdfvalue.RDFURN(root)
self.token = token
self.default_file_mode = _DEFAULT_MODE_FILE
self.default_dir_mode = _DEFAULT_MODE_DIRECTORY
try:
logging.info("Making sure supplied aff4path actually exists....")
self.getattr(root)
logging.info("OK")
except fuse.FuseOSError:
logging.info("Supplied aff4path didn't exist!")
raise IOError("Supplied aff4 path '%s' does not exist." % self.root)
def MakePartialStat(self, fd):
"""Try and give a 'stat' for something not in the data store.
Args:
fd: The object with no stat.
Returns:
A dictionary corresponding to what we'll say the 'stat' is
for objects which are not actually files, so have no OS level stat.
"""
is_dir = "Container" in fd.behaviours
return {
"pathspec": fd.Get(fd.Schema.PATHSPEC, ""),
"st_atime": fd.Get(fd.Schema.LAST, 0),
"st_blksize": 0,
"st_blocks": 0,
"st_ctime": 0,
"st_dev": 0,
"st_gid": 0,
"st_ino": 0,
"st_mode": self.default_dir_mode if is_dir else self.default_file_mode,
"st_mtime": 0,
"st_nlink": 0,
"st_rdev": 0,
"st_size": fd.Get(fd.Schema.SIZE, 0),
"st_uid": 0
}
def _IsDir(self, path):
"""True if and only if the path has the directory bit set in its mode."""
return stat.S_ISDIR(int(self.getattr(path)["st_mode"]))
# pylint: disable=unused-argument
def Readdir(self, path, fh=None):
"""Reads a directory given by path.
Args:
path: The path to list children of.
fh: A file handler. Not used.
Yields:
A generator of filenames.
Raises:
FuseOSError: If we try and list a file.
"""
# We can't read a path if it's a file.
if not self._IsDir(path):
raise fuse.FuseOSError(errno.ENOTDIR)
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token)
children = fd.ListChildren()
# Make these special directories unicode to be consistent with the rest of
# aff4.
for directory in [u".", u".."]:
yield directory
# ListChildren returns a generator, so we do the same.
for child in children:
# Filter out any directories we've chosen to ignore.
if child.Path() not in self.ignored_dirs:
yield child.Basename()
def Getattr(self, path, fh=None):
"""Performs a stat on a file or directory.
Args:
path: The path to stat.
fh: A file handler. Not used.
Returns:
A dictionary mapping st_ names to their values.
Raises:
FuseOSError: When a path is supplied that grr doesn't know about, ie an
invalid file path.
ValueError: If an empty path is passed. (The empty string, when passed to
self.root.Add, returns a path for aff4:/, the root directory, which is not
the behaviour we want.)
"""
if not path:
raise fuse.FuseOSError(errno.ENOENT)
if path != self.root:
full_path = self.root.Add(path)
else:
full_path = path
fd = aff4.FACTORY.Open(full_path, token=self.token)
# The root aff4 path technically doesn't exist in the data store, so
# it is a special case.
if full_path == "/":
return self.MakePartialStat(fd)
fd = aff4.FACTORY.Open(full_path, token=self.token)
# Grab the stat according to aff4.
aff4_stat = fd.Get(fd.Schema.STAT)
# If the Schema for the object has a STAT attribute, go ahead and return
# it as a dictionary.
if aff4_stat:
return aff4_stat.AsDict()
# If the object didn't have a stored stat, we figure out if it is a special
# grr object, or just doesn't exist.
# We now check if the aff4 object actually has a row in the data store.
# This prevents us from being able to cd to directories that don't exist,
# since such directories have a newly-created empty AFF4Object,
# but no row in the data store. Anything that is a
# row in the data store will have a LAST attribute, so we check that.
elif fd.Get(fd.Schema.LAST) is None:
# We raise the "no such file or directory" error.
raise fuse.FuseOSError(errno.ENOENT)
else:
# This is an object that exists in the datastore, but has no STAT, so we
# don't know how to handle it.
pass
# If the object was in the data store, but didn't have a stat, we just
# try and guess some sensible values.
return self.MakePartialStat(fd)
def Read(self, path, length=None, offset=0, fh=None):
"""Reads data from a file.
Args:
path: The path to the file to read.
length: How many bytes to read.
offset: Offset in bytes from which reading should start.
fh: A file handler. Not used.
Returns:
A string containing the file contents requested.
Raises:
FuseOSError: If we try and read a directory or if we try and read an
object that doesn't support reading.
"""
if self._IsDir(path):
raise fuse.FuseOSError(errno.EISDIR)
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token,
ignore_cache=True)
# If the object has Read() and Seek() methods, let's use them.
if all((hasattr(fd, "Read"),
hasattr(fd, "Seek"),
callable(fd.Read),
callable(fd.Seek))):
# By default, read the whole file.
if length is None:
length = fd.Get(fd.Schema.SIZE)
fd.Seek(offset)
return fd.Read(length)
else:
# If we don't have Read/Seek methods, we probably can't read this object.
raise fuse.FuseOSError(errno.EIO)
def RaiseReadOnlyError(self):
"""Raise an error complaining that the file system is read-only."""
raise fuse.FuseOSError(errno.EROFS)
# pylint: disable=invalid-name
def mkdir(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def symlink(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def rename(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def link(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def write(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def truncate(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
def create(self, *unused_args, **unused_kwargs):
"""Unimplemented on purpose. File system is read-only."""
self.RaiseReadOnlyError()
# pylint: enable=unused-argument,invalid-name
# FUSE expects the names of the functions to be standard
# filesystem function style (all lower case), so we set them so here.
read = utils.Proxy("Read")
readdir = utils.Proxy("Readdir")
getattr = utils.Proxy("Getattr")
class GRRFuse(GRRFuseDatastoreOnly):
"""Interacts with the GRR clients to refresh data in the datastore."""
def __init__(self, root="/", token=None, max_age_before_refresh=None,
ignore_cache=False, force_sparse_image=False,
sparse_image_threshold=1024**3,
timeout=flow_utils.DEFAULT_TIMEOUT):
"""Create a new FUSE layer at the specified aff4 path.
Args:
root: String aff4 path for where we'd like to mount the FUSE layer.
token: Datastore access token.
max_age_before_refresh: How out of date our cache is. Specifically, if the
time since we last did a client-side update of an aff4 object is greater
than this value, we'll run a flow on the client and update that object.
ignore_cache: If true, always refresh data from the client. Overrides
max_age_before_refresh.
force_sparse_image: Whether to try and store every file bigger than the
size threshold as a sparse image, regardless of whether we've already got
data for it.
sparse_image_threshold: If a new file is >= this size, store it
as an empty AFF4SparseImage.
timeout: How long to wait for a client to finish running a flow, maximum.
"""
self.size_threshold = sparse_image_threshold
self.force_sparse_image = force_sparse_image
self.timeout = timeout
if ignore_cache:
max_age_before_refresh = datetime.timedelta(0)
# Cache expiry can be given as a datetime.timedelta object, but if
# it is not we'll use the seconds specified as a flag.
if max_age_before_refresh is None:
self.max_age_before_refresh = datetime.timedelta(
seconds=flags.FLAGS.max_age_before_refresh)
else:
self.max_age_before_refresh = max_age_before_refresh
super(GRRFuse, self).__init__(root, token)
def DataRefreshRequired(self, path=None, last=None):
"""True if we need to update this path from the client.
Args:
path: The path relative to the root to check freshness of.
last: An aff4:last attribute to check freshness of.
At least one of path or last must be supplied.
Returns:
True if the path hasn't been updated in the last
self.max_age_before_refresh seconds, else False.
Raises:
type_info.TypeValueError: If no arguments are supplied.
"""
# If we didn't get given a last attribute, use the path to get one from the
# object.
if last is None:
if path is None:
# If we didn't get a path either, we can't do anything.
raise type_info.TypeValueError("Either 'path' or 'last' must"
" be supplied as an argument.")
else:
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token,
ignore_cache=True)
# We really care about the last time the stat was updated, so we use
# this instead of the LAST attribute, which is the last time anything
# was updated about the object.
stat_obj = fd.Get(fd.Schema.STAT)
if stat_obj:
last = stat_obj.age
else:
last = rdfvalue.RDFDatetime(0)
# If the object doesn't even have a LAST attribute by this point,
# we say it hasn't been accessed within the cache expiry time.
if last is None:
return True
last = last.AsDatetime()
# Remember to use UTC time, since that's what the datastore uses.
if datetime.datetime.utcnow() - last > self.max_age_before_refresh:
return True
return False
def _RunAndWaitForVFSFileUpdate(self, path):
"""Runs a flow on the client, and waits for it to finish."""
client_id = client.GetClientURNFromPath(path)
# If we're not actually in a directory on a client, no need to run a flow.
if client_id is None:
return
flow_utils.UpdateVFSFileAndWait(
client_id,
token=self.token,
vfs_file_urn=self.root.Add(path),
timeout=self.timeout)
def Readdir(self, path, fh=None):
"""Updates the directory listing from the client.
Args:
path: The path to the directory to update. Client is inferred from this.
fh: A file handler. Not used.
Returns:
A list of filenames.
"""
if self.DataRefreshRequired(path):
self._RunAndWaitForVFSFileUpdate(path)
return super(GRRFuse, self).Readdir(path, fh=None)
def GetMissingChunks(self, fd, length, offset):
"""Return which chunks a file doesn't have.
Specifically, we return a list of the chunks specified by a
length-offset range which are not in the datastore.
Args:
fd: The database object to read chunks from.
length: Length to read.
offset: File offset to read from.
Returns:
A list of chunk numbers.
"""
# Seek in the index for where the hashes of the specified length/offset
# should be.
start_chunk = offset / fd.chunksize
end_chunk = (offset + length) / fd.chunksize
missing_chunks = []
for chunk in xrange(start_chunk, end_chunk + 1):
try:
# pylint: disable=protected-access
chunk_name = fd._GetChunkForReading(chunk)
# pylint: enable=protected-access
except aff4.ChunkNotFoundError:
missing_chunks.append(chunk)
else:
chunk_last = chunk_name.Get(chunk_name.Schema.LAST)
if self.DataRefreshRequired(last=chunk_last):
missing_chunks.append(chunk)
return missing_chunks
def UpdateSparseImageIfNeeded(self, fd, length, offset):
missing_chunks = self.GetMissingChunks(fd, length, offset)
if not missing_chunks:
return
client_id = client.GetClientURNFromPath(fd.urn.Path())
flow_utils.StartFlowAndWait(client_id, token=self.token,
flow_name="UpdateSparseImageChunks",
file_urn=fd.urn,
chunks_to_fetch=missing_chunks)
def Read(self, path, length=None, offset=0, fh=None):
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token,
ignore_cache=True)
last = fd.Get(fd.Schema.CONTENT_LAST)
client_id = client.GetClientURNFromPath(path)
if isinstance(fd, standard.AFF4SparseImage):
# If we have a sparse image, update just a part of it.
self.UpdateSparseImageIfNeeded(fd, length, offset)
else:
# If it's the first time we've seen this path (or we're asking
# explicitly), try and make it an AFF4SparseImage.
if last is None or self.force_sparse_image:
pathspec = fd.Get(fd.Schema.PATHSPEC)
# Either makes a new AFF4SparseImage or gets the file fully,
# depending on size.
flow_utils.StartFlowAndWait(client_id, token=self.token,
flow_name="MakeNewAFF4SparseImage",
pathspec=pathspec,
size_threshold=self.size_threshold)
# Reopen the fd in case it's changed to be an AFF4SparseImage
fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token)
# If we are now a sparse image, just download the part we requested
# from the client.
if isinstance(fd, standard.AFF4SparseImage):
flow_utils.StartFlowAndWait(client_id, token=self.token,
flow_name="FetchBufferForSparseImage",
file_urn=self.root.Add(path),
length=length, offset=offset)
else:
# This was a file we'd seen before that wasn't a sparse image, so update
# it the usual way.
if self.DataRefreshRequired(last=last):
self._RunAndWaitForVFSFileUpdate(path)
# Read the file from the datastore as usual.
return super(GRRFuse, self).Read(path, length, offset, fh)
def Usage():
print "Needs at least --mountpoint"
print ("e.g. \n python grr/tools/fuse_mount.py "
"--config=grr/config/grr-server.yaml "
"--mountpoint=/home/%s/mntpoint"
% getpass.getuser())
def main(unused_argv):
config_lib.CONFIG.AddContext(
"Commandline Context",
"Context applied for all command line tools")
startup.Init()
if fuse is None:
logging.critical("""Could not start!
fusepy must be installed to run fuse_mount.py!
Try:
sudo pip install fusepy""")
sys.exit(1)
if not flags.FLAGS.mountpoint:
Usage()
sys.exit(1)
# We multiple inherit from GRRFuse and fuse.Operations. In the
# case that fuse is present, we run the actual FUSE layer, since we have
# fuse.Operations. In the case that fuse is not present, we have already
# exited by now if we were run from the command line, and if we were not run
# from the command line, we've been imported, and we run the tests using a
# mock fuse.
class FuseOperation(GRRFuse, fuse.Operations):
pass
root = flags.FLAGS.aff4path
username = flags.FLAGS.username or getpass.getuser()
token = rdfvalue.ACLToken(username=username, reason=flags.FLAGS.reason or
"fusemount")
# If we're exporting a path inside a client, check to see if we have access to
# that client and get the appropriate token.
client_id = client.GetClientURNFromPath(root)
if client_id is not None:
token = security.Approval.GetApprovalForObject(
client_id,
token=token,
username=username)
data_store.default_token = token
logging.info("fuse_mount.py is mounting %s at %s....", root,
flags.FLAGS.mountpoint)
refresh_policy = flags.FLAGS.refresh_policy
if refresh_policy == "always":
max_age_before_refresh = datetime.timedelta(0)
elif refresh_policy == "never":
# Set the max age to be the maximum possible time difference.
max_age_before_refresh = datetime.timedelta(datetime.timedelta.max)
elif refresh_policy == "if_older_than_max_age":
max_age_before_refresh = datetime.timedelta(
seconds=flags.FLAGS.max_age_before_refresh)
else:
# Otherwise, a flag outside the enum was given and the flag validator threw
# an execption.
pass
fuse_operation = FuseOperation(
root=root,
token=token,
max_age_before_refresh=max_age_before_refresh,
ignore_cache=flags.FLAGS.ignore_cache,
force_sparse_image=flags.FLAGS.force_sparse_image,
sparse_image_threshold=flags.FLAGS.sparse_image_threshold,
timeout=flags.FLAGS.timeout)
fuse.FUSE(fuse_operation, flags.FLAGS.mountpoint,
foreground=not flags.FLAGS.background)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "5b8b0da7abe870a5ff3ca0d4b39d7059",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 80,
"avg_line_length": 33.88621794871795,
"alnum_prop": 0.6480018917001655,
"repo_name": "defaultnamehere/grr",
"id": "0f5234c1ed9777aed3ed6fc2262a23d8f5eaa0c2",
"size": "21167",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/fuse_mount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
} |
from pymongo import ASCENDING, DESCENDING
ENTRIES = 20
def validate(revision, revisions_count):
if revision < 0 or revision >= revisions_count:
raise ValueError("revision index out of bound! " + str(revision))
return revision
class ArticlesParams(object):
def __init__(self, from_revision, to_revision):
self.from_revision = int(from_revision) if from_revision else None
self.to_revision = int(to_revision) if to_revision else None
def from_version(self, revisions_count):
from_revision = max(0, revisions_count - 2) if self.from_revision is None else self.from_revision
return validate(from_revision, revisions_count)
def to_version(self, revisions_count):
to_revision = max(0, revisions_count - 1) if self.to_revision is None else self.to_revision
return validate(to_revision, revisions_count)
@classmethod
def from_req(cls, req):
from_revision = req.params.get('from_revision', None)
to_revision = req.params.get('to_revision', None)
return cls(from_revision, to_revision)
class NewsParams(object):
def __init__(self, page, sort_by, order, lang, publisher):
self.page = page
self.sort_by = sort_by
self.order = order
self.lang = lang
self.publisher = publisher
def query(self):
query_string = {'$where': 'this.created_at<this.updated_at'}
if self.lang != 'all':
query_string['lang'] = self.lang
if self.publisher:
query_string['publisher'] = self.publisher
return query_string
def skipped_pages(self):
return ENTRIES * (self.page - 1)
def by_order(self):
order = ASCENDING if self.order == 'asc' else DESCENDING
sort_by_field = 'comments_no' if self.sort_by == 'popular' \
else 'updated_at' if self.sort_by == 'time' else 'changes'
return [(sort_by_field, order)]
def get_from(self, db):
return db.find(self.query()).sort(self.by_order()).skip(self.skipped_pages()).limit(ENTRIES)
def get_meta(self, cursor):
count = cursor.count()
prefix = ''.join(['/api/publisher/', self.publisher, '/news']) if self.publisher else '/api/news'
next_url = ''.join([prefix, '?page=', str(self.page + 1), '&sort_by=', self.sort_by, '&order=',
self.order, '&lang=', self.lang]) if count > self.page * ENTRIES else None
return {"count": ENTRIES, "total_count": count, "next": next_url}
@classmethod
def from_req(cls, req, publisher_code):
page = int(req.params.get('page', '1'))
sort_by = req.params.get('sort_by', 'changes')
if sort_by not in ['popular', 'time', 'changes']:
raise ValueError()
order = req.params.get('order', 'desc')
if order not in ['asc', 'desc']:
raise ValueError()
lang = req.params.get('lang', 'all')
return cls(page, sort_by, order, lang, publisher_code)
class SearchParams(object):
def __init__(self, page, sort_by, order, lang, keyword, publisher=None):
self.keyword = keyword
self.page = page
self.sort_by = sort_by
self.order = order
self.lang = lang
self.publisher = publisher
def query(self):
query_string = {'$where': 'this.created_at<this.updated_at'}
if self.lang != 'all':
query_string['lang'] = self.lang
if self.publisher:
query_string['publisher'] = self.publisher
query_string['title'] = {'$regex': '.*'+self.keyword+'.*'}
return query_string
def skipped_pages(self):
return ENTRIES * (self.page - 1)
def by_order(self):
order = ASCENDING if self.order == 'asc' else DESCENDING
sort_by_field = 'comments_no' if self.sort_by == 'popular' \
else 'updated_at' if self.sort_by == 'time' else 'changes'
return [(sort_by_field, order)]
def get_from(self, db):
return db.find(self.query()).sort(self.by_order()).skip(self.skipped_pages()).limit(ENTRIES)
def get_meta(self, cursor):
count = cursor.count()
next_url = ''.join(['/api/search/news?keyword',self.keyword, 'page=', str(self.page + 1), '&sort_by=',
self.sort_by, '&order=', self.order, '&lang=', self.lang
]) if count > self.page * ENTRIES else None
return {"count": ENTRIES, "total_count": count, "next": next_url}
@classmethod
def from_req(cls, req):
page = int(req.params.get('page', '1'))
sort_by = req.params.get('sort_by', 'changes')
if sort_by not in ['popular', 'time', 'changes']:
raise ValueError()
order = req.params.get('order', 'desc')
if order not in ['asc', 'desc']:
raise ValueError()
lang = req.params.get('lang', 'all')
keyword = req.params.get('keyword')
return cls(page, sort_by, order, lang, keyword) | {
"content_hash": "d89cab105f2edaa657f1b4f2eadf3657",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 110,
"avg_line_length": 38.85271317829457,
"alnum_prop": 0.5905826017557861,
"repo_name": "code4hk/NewsdiffHK-Backend",
"id": "3ee9d62e254bcabd65b0b96a7466cd4402ca2390",
"size": "5012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9670"
}
],
"symlink_target": ""
} |
"""Test hassbian config."""
from unittest.mock import patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_IMPERIAL
from homeassistant.util import dt as dt_util, location
from tests.common import mock_coro
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
async def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
with patch.object(config, "SECTIONS", ["core"]):
assert await async_setup_component(hass, "config", {})
return await hass_ws_client(hass)
async def test_validate_config_ok(hass, hass_client):
"""Test checking config."""
with patch.object(config, "SECTIONS", ["core"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
with patch(
"homeassistant.components.config.core.async_check_ha_config_file",
return_value=mock_coro(),
):
resp = await client.post("/api/config/core/check_config")
assert resp.status == 200
result = await resp.json()
assert result["result"] == "valid"
assert result["errors"] is None
with patch(
"homeassistant.components.config.core.async_check_ha_config_file",
return_value=mock_coro("beer"),
):
resp = await client.post("/api/config/core/check_config")
assert resp.status == 200
result = await resp.json()
assert result["result"] == "invalid"
assert result["errors"] == "beer"
async def test_websocket_core_update(hass, client):
"""Test core config update websocket command."""
assert hass.config.latitude != 60
assert hass.config.longitude != 50
assert hass.config.elevation != 25
assert hass.config.location_name != "Huis"
assert hass.config.units.name != CONF_UNIT_SYSTEM_IMPERIAL
assert hass.config.time_zone.zone != "America/New_York"
await client.send_json(
{
"id": 5,
"type": "config/core/update",
"latitude": 60,
"longitude": 50,
"elevation": 25,
"location_name": "Huis",
CONF_UNIT_SYSTEM: CONF_UNIT_SYSTEM_IMPERIAL,
"time_zone": "America/New_York",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert hass.config.latitude == 60
assert hass.config.longitude == 50
assert hass.config.elevation == 25
assert hass.config.location_name == "Huis"
assert hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL
assert hass.config.time_zone.zone == "America/New_York"
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_websocket_core_update_not_admin(hass, hass_ws_client, hass_admin_user):
"""Test core config fails for non admin."""
hass_admin_user.groups = []
with patch.object(config, "SECTIONS", ["core"]):
await async_setup_component(hass, "config", {})
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": "config/core/update", "latitude": 23})
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "unauthorized"
async def test_websocket_bad_core_update(hass, client):
"""Test core config update fails with bad parameters."""
await client.send_json({"id": 7, "type": "config/core/update", "latituude": 23})
msg = await client.receive_json()
assert msg["id"] == 7
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "invalid_format"
async def test_detect_config(hass, client):
"""Test detect config."""
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=mock_coro(None),
):
await client.send_json({"id": 1, "type": "config/core/detect"})
msg = await client.receive_json()
assert msg["success"] is True
assert msg["result"] == {}
async def test_detect_config_fail(hass, client):
"""Test detect config."""
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=mock_coro(
location.LocationInfo(
ip=None,
country_code=None,
country_name=None,
region_code=None,
region_name=None,
city=None,
zip_code=None,
latitude=None,
longitude=None,
use_metric=True,
time_zone="Europe/Amsterdam",
)
),
):
await client.send_json({"id": 1, "type": "config/core/detect"})
msg = await client.receive_json()
assert msg["success"] is True
assert msg["result"] == {"unit_system": "metric", "time_zone": "Europe/Amsterdam"}
| {
"content_hash": "740aa5a5fb961e133dc5a5c2e79344c2",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 86,
"avg_line_length": 31.7125,
"alnum_prop": 0.6249507292077257,
"repo_name": "postlund/home-assistant",
"id": "8caa0f3e6fbe8033e803f850eb4c202b7970a3dc",
"size": "5074",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/config/test_core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields,osv
from openerp import tools
class report_project_task_user(osv.Model):
_inherit = "report.project.task.user"
_columns = {
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
}
def _select(self):
return super(report_project_task_user, self)._select() + ", progress as progress, t.effective_hours as hours_effective, remaining_hours as remaining_hours, total_hours as total_hours, t.delay_hours as hours_delay, planned_hours as hours_planned"
def _group_by(self):
return super(report_project_task_user, self)._group_by() + ", remaining_hours, t.effective_hours, progress, total_hours, planned_hours, hours_delay"
| {
"content_hash": "aed488eac5a46477ae80ec189ecee6f4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 254,
"avg_line_length": 52.45454545454545,
"alnum_prop": 0.7027729636048526,
"repo_name": "vileopratama/vitech",
"id": "34f3e5eeef3ada1a4ea92cef772a0cc043b76968",
"size": "1178",
"binary": false,
"copies": "42",
"ref": "refs/heads/master",
"path": "src/addons/project_timesheet/report/project_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
import os
import sys
from fileparser import FileParser
from common import File
from easysub import EasySub
class EasySubConsole(object):
def __init__(self):
super(EasySubConsole, self).__init__()
self._file_parser = FileParser()
self._easysub = EasySub()
def _usage(self):
return u"""Usage:
main.py path [path, ...]
path: File or directory's absolute path
"""
def _exit(self, code=0):
sys.exit(code)
def _validate_paths(self, paths):
for path in paths:
if not os.path.exists(path):
return False
return True
def _get_files_from_paths(self, paths):
files = list()
for path in paths:
if os.path.isfile(path) and self._file_parser.is_media_file(path):
file = File(path=path)
if not os.path.exists(file.sub_absolute_path):
files.append(file)
else:
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isfile(item_path) and self._file_parser.is_media_file(item_path):
file = File(path=item_path)
if not os.path.exists(file.sub_absolute_path):
files.append(file)
return files
def run(self, args):
if not args:
print self._usage()
self._exit(1)
if not self._validate_paths(args):
print u'Some of the input paths are not valid.'
self._exit(2)
files = self._get_files_from_paths(args)
files_n = len(files)
print unicode(files_n) + u' media files without subtitles found!'
for file in files:
if self._easysub.process_file(file):
print os.path.basename(file.sub_absolute_path) + u' is available.'
else:
print os.path.basename(file.sub_absolute_path) + u' could not be downloaded.'
self._exit(0)
def main():
EasySubConsole().run(sys.argv[1:])
if __name__ == u'__main__':
main()
| {
"content_hash": "a7a02b0311612a3a26983daa67ea4c57",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 81,
"avg_line_length": 24.52112676056338,
"alnum_prop": 0.6645605973578403,
"repo_name": "scorpiontahir02/easysub",
"id": "166abd48194f6aae660a0b6c51c8f12bc91e76f6",
"size": "1741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/easysub/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42367"
}
],
"symlink_target": ""
} |
"""Test the included examples.
"""
import pytest
from conftest import get_example
def test_nodeid(ctestdir):
"""Node ids
"""
with get_example("nodeid.py").open("rt") as f:
ctestdir.makepyfile(f.read())
result = ctestdir.runpytest("--verbose")
try:
result.assert_outcomes(passed=6, skipped=0, failed=0, xfailed=1)
except TypeError:
result.assert_outcomes(passed=6, skipped=0, failed=0)
result.stdout.re_match_lines(r"""
.*::test_a PASSED
.*::test_b\[7-True\] PASSED
.*::test_b\[0-False\] PASSED
.*::test_b\[-1-False\] (?:XFAIL(?:\s+\(.*\))?|xfail)
.*::TestClass::test_c PASSED
.*::TestClass::test_d\[order\] PASSED
.*::TestClass::test_d\[disorder\] PASSED
""")
| {
"content_hash": "8a98229e6e7eed5cfa8bd99ff8cca151",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 29.807692307692307,
"alnum_prop": 0.5793548387096774,
"repo_name": "RKrahl/pytest-dependency",
"id": "7d45ba7f369baaef31e99b495117d47e7eb7e025",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_09_examples_names.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "577"
},
{
"name": "Python",
"bytes": "61407"
}
],
"symlink_target": ""
} |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SpeciesPrompt(object):
def setupUi(self, SpeciesPrompt):
SpeciesPrompt.setObjectName("SpeciesPrompt")
SpeciesPrompt.resize(325, 132)
SpeciesPrompt.setFocusPolicy(QtCore.Qt.StrongFocus)
self.centralwidget = QtWidgets.QWidget(SpeciesPrompt)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 321, 122))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.species_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.species_label.setObjectName("species_label")
self.horizontalLayout.addWidget(self.species_label)
self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)
self.dataset_name = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.dataset_name.setObjectName("dataset_name")
self.gridLayout.addWidget(self.dataset_name, 3, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.species_selection = QtWidgets.QComboBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.species_selection.sizePolicy().hasHeightForWidth())
self.species_selection.setSizePolicy(sizePolicy)
self.species_selection.setObjectName("species_selection")
self.gridLayout.addWidget(self.species_selection, 2, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
spacerItem = QtWidgets.QSpacerItem(40, 10, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem)
self.apply_button = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.apply_button.setDefault(True)
self.apply_button.setObjectName("apply_button")
self.horizontalLayout_9.addWidget(self.apply_button)
self.verticalLayout.addLayout(self.horizontalLayout_9)
SpeciesPrompt.setCentralWidget(self.centralwidget)
self.retranslateUi(SpeciesPrompt)
QtCore.QMetaObject.connectSlotsByName(SpeciesPrompt)
def retranslateUi(self, SpeciesPrompt):
_translate = QtCore.QCoreApplication.translate
SpeciesPrompt.setWindowTitle(_translate("SpeciesPrompt", "Species Prompt"))
self.label.setText(_translate("SpeciesPrompt", "Dataset Name:"))
self.species_label.setText(_translate("SpeciesPrompt", "Ion Species:"))
self.apply_button.setText(_translate("SpeciesPrompt", "Apply"))
| {
"content_hash": "39b253963aa53c7b26cd963197a29fe8",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 114,
"avg_line_length": 58.87931034482759,
"alnum_prop": 0.7352855051244509,
"repo_name": "DanielWinklehner/py_particle_processor",
"id": "8603086d88a9c472189d0fb579a436f6935a87b8",
"size": "3621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_particle_processor_qt/gui/species_prompt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "358145"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.elasticsan import ElasticSanManagement
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-elasticsan
# USAGE
python volume_groups_get_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ElasticSanManagement(
credential=DefaultAzureCredential(),
subscription_id="aaaaaaaaaaaaaaaaaa",
)
response = client.volume_groups.get(
resource_group_name="rgelasticsan",
elastic_san_name="ti7q-k952-1qB3J_5",
volume_group_name="u_5I_1j4t3",
)
print(response)
# x-ms-original-file: specification/elasticsan/resource-manager/Microsoft.ElasticSan/preview/2021-11-20-preview/examples/VolumeGroups_Get_MinimumSet_Gen.json
if __name__ == "__main__":
main()
| {
"content_hash": "06a4b4e1f421082723ded84aa233bf88",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 157,
"avg_line_length": 34.11764705882353,
"alnum_prop": 0.728448275862069,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f05e5ff0bb068b8483bad802789b32eae640e4d7",
"size": "1628",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/elasticsan/azure-mgmt-elasticsan/generated_samples/volume_groups_get_minimum_set_gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0003_remove_surveytranslation_slug'),
]
operations = [
migrations.AlterField(
model_name='question',
name='of_type',
field=models.CharField(choices=[('tf', 'true or false'), ('mc', 'multiple choice'), ('txt', 'text')], default='tf', max_length=3, verbose_name='type'),
),
migrations.AlterField(
model_name='questiontranslation',
name='question',
field=models.TextField(verbose_name='question'),
),
]
| {
"content_hash": "57cf7801d6275e90946d4ba428de49e9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 163,
"avg_line_length": 29.82608695652174,
"alnum_prop": 0.5889212827988338,
"repo_name": "vandorjw/django-assessment",
"id": "8dcec75275b90e0c2c4657ed9f94b637f7fe05aa",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assessment/migrations/0004_auto_20171019_1503.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "649"
},
{
"name": "Makefile",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "50642"
},
{
"name": "Shell",
"bytes": "265"
}
],
"symlink_target": ""
} |
import utils
from copy import copy
def cache_transform(transformation):
cache = {}
def fn(domain):
cached = cache.get(domain)
if cached is None:
cached = transformation(domain)
cache[domain] = cached
return cached
return fn
class Domain(object):
"""
Base class for domains
"""
filtered = False
step_jumps = False
strict = False
def __init__(self, name=None):
self._name = name
self._size = -1
@property
def name(self):
""" Name of domain. Serves only for debugging purpose. """
if self._name:
return self._name
else:
return self.__class__.__name__
@property
def size(self):
"""Number of elements in domain.
If the size is ``None`` then number of elements cannot be determined.
If the domain is non-filtered and the size is int then it is the exact
number of elements. If the domain is filtered and the size is int then
it is an upper bound for the number of elements.
"""
s = self._size
if s == -1:
s = self._compute_size()
self._size = s
return s
def set_name(self, name):
"""Creates a copy of domain with changed name.
"""
domain = copy(self)
domain._name = name
return domain
def _compute_size(self):
"""Computes the size of domain
Each domain where size can be determined should overload this method.
"""
return None
def __mul__(self, other):
"""Creates a cartesian product of domains.
It is equivalent to ``Product((self, other))``, see
:class:`haydi.Product`
"""
return Product((self, other))
def __add__(self, other):
"""Join two domains
It is equivalent to ``Join((self, other))``, see :class:`haydi.Join`
"""
return Join((self, other))
def __pow__(self, exponent):
return Sequences(self, exponent)
def iterate_steps(self, start, end):
"""Create iterator over a given range of steps
It internally calls ``create_skip_iter()`` and ignores StepSkips.
Args:
start (int): Index of first step
end (int): Index of the last step
Examples:
>>> list(hd.Range(10).iterate_steps(1, 7))
[1, 2, 3, 4, 5, 6]
>>> list(hd.Range(10).filter(lambda x: x % 3 == 0)
... .iterate_steps(1, 7))
[3, 6]
"""
i = start
it = self.create_skip_iter(start)
while i < end:
v = next(it)
if isinstance(v, StepSkip):
i += v.value
else:
yield v
i += 1
# Internal
def _set_flags_from_domain(self, domain):
self.filtered = domain.filtered
self.step_jumps = domain.step_jumps
self.strict = domain.strict
def _set_flags_from_domains(self, domains):
self.filtered = any(d.filtered for d in domains)
self.step_jumps = all(d.step_jumps for d in domains)
self.strict = all(d.strict for d in domains)
# Actions
def max(self, value_fn=None, size=None):
"""
Shortcut for ``.iterate().max(...)``
"""
return self.iterate().max(value_fn, size)
def groups(self, key_fn, max_items_per_group=None):
"""
Shortcut for ``.iterate().groups(...)``
"""
return self.iterate().groups(key_fn, max_items_per_group)
def groups_counts(self, key_fn, max_items_per_group):
"""
Shortcut for ``.iterate().group_counts(...)``
"""
return self.iterate().groups_counts(key_fn, max_items_per_group)
def collect(self):
"""
Shortcut for ``.iterate().collect()``
Example:
>>> hd.Range(4).collect().run()
[0, 1, 2, 3]
"""
return self.iterate().collect()
def reduce(self, reduce_fn, init_value=0, associative=True):
"""
Shortcut for ``.iterate().reduce(...)``
"""
return self.iterate().reduce(reduce_fn, init_value, associative)
def take(self, count):
"""
Shortcut for ``.iterate().take(count)``
"""
return self.iterate().take(count)
def first(self, default=None):
"""
Shortcut for ``.iterate().first(default)``
"""
return self._make_pipeline("iterate").first(default)
# Transformations
def map(self, fn):
"""
Transformation: Map a function `fn` over elements of domain
Example:
>>> list(hd.Range(4).map(lambda x: x + 10))
[10, 11, 12, 13]
"""
return TransformedDomain(self, transform.MapTransformation(fn))
def filter(self, fn, strict=False):
"""
Transformation: Filters elements from domain
It calls function `fn` on each element, if the function
returns ``False`` then the element is filtered out.
Note, that the resulting domain has `filtered` set to ``True``,
Example:
>>> list(hd.Range(4).filter(lambda x: x % 2 == 1))
[1, 3]
"""
return TransformedDomain(
self, transform.FilterTransformation(fn, strict))
# Others
def run(self, ctx=None, timeout=None):
"""A shortcut for ``self.collect().run(ctx, timeout)``"""
return self.collect().run(ctx, timeout)
# Shortcuts
# def take(self, count):
# return transform.iterate().take(count)
def _make_iter(self, step):
"""Creates an interator over elements of the domain
Each domain implementation should override this method.
"""
raise NotImplementedError()
def _make_skip_iter(self, step):
"""Create a skip iterator over elements of the domain
Each (potentially) filtered domain implementation should override this
method.
"""
raise NotImplementedError()
def create_iter(self, step=0):
return self._make_iter(step)
def create_skip_iter(self, step=0):
if self.filtered:
return self._make_skip_iter(step)
else:
return self._make_iter(step)
def __iter__(self):
return self.create_iter()
def _make_pipeline(self, method):
return Pipeline(self, method)
def generate_one(self):
"""Generate a random element from the domain"""
raise Exception("Domain {} do not support generation"
.format(type(self).__name__))
def iterate(self):
"""Create a pipeline that iterates over all elements in the domain
The method returns instance of :class:`haydi.Pipeline` with "iterate"
method.
"""
return self._make_pipeline("iterate")
def generate(self, count=None):
"""Create a pipeline that generates random element from the domain
The method returns instance of :class:`haydi.Pipeline` with "generate"
method.
Args:
count (int or None): The number of generated elements in the
pipeline, if ``count`` is ``None`` then the
pipeline generates an infinite number of
elements.
"""
pipeline = self._make_pipeline("generate")
if count is None:
return pipeline
else:
return pipeline.take(count)
def cnfs(self):
"""Create a pipeline iterating over canonical elements in the domain
The method returns instance of :class:`haydi.Pipeline` with "cnfs"
method.
This works only for *strict* domains. If called on a non-strict domain,
then an exception is thrown.
"""
return self._make_pipeline("cnfs")
def to_values(self, max_size=None):
"""Materialize the domain (or its subdomains)
This method serves as an optimization for caching elements of
heavily-used small domains.
If the argument ``max_size`` is ``None`` or ``self.size`` is at most
``max_size`` then the call is equivalent to ``hd.Values(tuple(self))``.
Otherwise the method is applied on subdomains recursively.
Example:
>>> p = hd.Range(3) * hd.Range(5) * hd.Range(6)
>>> q = p.to_values(5)
``q`` is the same as writing::
>>> hd.Values((0, 1, 2)) * hd.Values((0, 1, 2, 3, 4)) * hd.Range(6)
Args:
max_size (int or None): The size limit for materialization
"""
if max_size is None:
max_size = self.size
if self.size <= max_size:
return Values(tuple(self.create_iter()))
else:
return self._remap_domains(
cache_transform(lambda d: d.to_values(max_size)))
def to_cnf_values(self, max_size=None):
if max_size is None:
max_size = self.size
if self.size <= max_size:
return CnfValues(tuple(self.cnfs()), _check=False)
else:
return self._remap_domains(
cache_transform(lambda d: d.to_cnf_values(max_size)))
def _remap_domains(self, fn):
"""
Returns an instance of the current domain with subdomains
transformed with the given transformation.
Domains with subdomains should override this method.
Args:
fn (callable): A function called on each subdomain
Returns:
An instance of :class:`haydi.Domain`
"""
return self
def __repr__(self):
ITEMS_LIMIT = 5
ITEM_CHAR_LIMIT = 24
CHAR_LIMIT = 48
size = self.size
if size == 0:
extra = " {}"
elif not self.filtered and size is not None:
it = self.create_iter()
remaining = CHAR_LIMIT
tokens = [" {"]
last = size - 1
for i in xrange(ITEMS_LIMIT):
try:
item_repr = repr(it.next())
except StopIteration:
tokens = ["size of domain is not consistent with iterator"]
break
item_repr = utils.limit_string_length(
item_repr, min(remaining, ITEM_CHAR_LIMIT))
remaining -= len(item_repr)
tokens.append(item_repr)
if i == last:
tokens.append("}")
break
else:
tokens.append(", ")
remaining -= 2
if remaining <= 6:
tokens.append("...}")
break
else:
tokens.append("...}")
extra = "".join(tokens)
else:
if self.filtered:
extra = " filtered"
else:
extra = ""
return "<{} size={}{}>".format(self.name, self.size, extra)
class StepSkip(object):
def __init__(self, value=1):
self.value = value
def __repr__(self):
return "<StepSkip {}>".format(self.value)
def __eq__(self, other):
return isinstance(other, StepSkip) and other.value == self.value
def __ne__(self, other):
return not self.__eq__(other)
class TransformedDomain(Domain):
def __init__(self, parent, transformation):
name = type(transformation).__name__
super(TransformedDomain, self).__init__(name)
self.parent = parent
self.transformation = transformation
transformation.init_transformed_domain(self, parent)
def _compute_size(self):
return self.transformation.size_of_transformed_domain(self.parent.size)
def _make_iter(self, step):
return self.transformation.transform_iter(
self.parent.create_iter(step))
def _make_skip_iter(self, step):
return self.transformation.transform_skip_iter(
self.parent.create_skip_iter(step))
def _make_pipeline(self, method):
pipeline = self.parent._make_pipeline(method)
return pipeline._add_transformation(self.transformation)
def generate_one(self):
pipeline = self.parent.generate()._add_transformation(
self.transformation)
return pipeline.first().run()
skip1 = StepSkip(1)
from .product import Product # noqa
from .join import Join # noqa
from . import transform # noqa
from .pipeline import Pipeline # noqa
from .values import Values, CnfValues # noqa
from .sequence import Sequences # noqa
| {
"content_hash": "8ff1939c9dea272be942b84d337538cd",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 79,
"avg_line_length": 28.904545454545456,
"alnum_prop": 0.5496933480106935,
"repo_name": "Kobzol/haydi",
"id": "619bee6dcdd2f96dbe3e9a03fcca8728e1979851",
"size": "12718",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/haydi/base/domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186796"
},
{
"name": "Shell",
"bytes": "614"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 5e737138aae2
Revises: b26756ac7225
Create Date: 2017-08-08 16:53:04.953458
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5e737138aae2'
down_revision = 'b26756ac7225'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('posters', sa.Column('id_admin', postgresql.UUID(as_uuid=True), nullable=False))
op.create_unique_constraint(None, 'posters', ['id_admin'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'posters', type_='unique')
op.drop_column('posters', 'id_admin')
# ### end Alembic commands ###
| {
"content_hash": "c9825775dfecc1bea42c256406bb8025",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 27.9,
"alnum_prop": 0.6953405017921147,
"repo_name": "TailorDev/pauling",
"id": "8501570f799011e06da223396f52ec7b7924f5a1",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/versions/5e737138aae2_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3238"
},
{
"name": "HTML",
"bytes": "122789"
},
{
"name": "Java",
"bytes": "2017"
},
{
"name": "JavaScript",
"bytes": "52407"
},
{
"name": "Makefile",
"bytes": "2893"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Objective-C",
"bytes": "4524"
},
{
"name": "Python",
"bytes": "25209"
},
{
"name": "Ruby",
"bytes": "10058"
},
{
"name": "Shell",
"bytes": "1193"
}
],
"symlink_target": ""
} |
"""
Krakrobot Python Simulator
Simulator which runs the simulation and renders SVG frames.
"""
from Queue import Queue
import time
import numpy as np
import datetime
from math import (
pi
)
import traceback
from map import load_map
from misc.defines import *
from robot import Robot
from robot_controller import PythonTimedRobotController
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(funcName)s - %(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
class KrakrobotSimulator(object):
def __init__(self,
map,
robot_controller,
init_position=None,
steering_noise=0.01,
color_noise=10,
sonar_noise=0.1,
distance_noise=0.001,
forward_steering_drift=0,
measurement_noise=0.2,
speed=5.0,
turning_speed=0.4 * pi,
execution_cpu_time_limit=10.0,
simulation_time_limit=10.0,
simulation_dt=0.0,
frame_dt=0.1,
gps_delay=2.0,
collision_threshold=50,
iteration_write_frequency=1000,
command_line=True,
print_robot=True,
seed=777,
print_logger=False,
accepted_commands=[TURN, MOVE, BEEP, FINISH, SENSE_COLOR]
):
"""
Construct KrakrobotSimulator instancew
:param steering_noise - variance of steering in move
:param distance_noise - variance of distance in move
:param measurement_noise - variance of measurement (GPS??)
:param map - map for the robot simulator representing the maze or file to map
:param init_position - starting position of the Robot (can be moved to map class) [x,y,heading]
:param speed - distance travelled by one move action (cannot be bigger than 0.5, or he could traverse the walls)
:param simulation_time_limit - limit in ms for whole robot execution (also with init)
:param collision_threshold - maximum number of collisions after which robot is destroyed
:param simulation_dt - controlls simulation calculation intensivity
:param frame_dt - save frame every dt
:param robot - RobotController class that will be simulated in run procedure
"""
if type(map) is str :
self.map = load_map(map)
for row in self.map['board']:
logger.info(row)
else:
self.map = map
self.iteration_write_frequency = iteration_write_frequency
self.collision_threshold = collision_threshold
if init_position is not None:
self.init_position = tuple(init_position)
else:
for i in xrange(self.map['N']):
for j in xrange(self.map['M']):
if self.map['board'][i][j] == MAP_START_POSITION:
self.init_position = (i + 0.5, j + 0.5, 0)
self.speed = speed
self.seed = seed
self.turning_speed = turning_speed
self.simulation_dt = simulation_dt
self.frame_dt = frame_dt
self.robot_controller = robot_controller
self.print_robot = print_robot
self.print_logger = print_logger
self.accepted_commands = accepted_commands
self.command_line = command_line
self.sonar_time = SONAR_TIME
self.gps_delay = gps_delay
self.light_sensor_time = LIGHT_SENSOR_TIME
self.simulation_time_limit = simulation_time_limit
self.execution_cpu_time_limit = execution_cpu_time_limit
self.goal_threshold = 0.5 # When to declare goal reach
self.color_noise = color_noise
self.sonar_noise = sonar_noise
self.distance_noise = distance_noise
self.forward_steering_drift = forward_steering_drift
self.measurement_noise = measurement_noise
self.steering_noise = steering_noise
self.reset()
# TODO: Disable logger printing when needed
if self.print_logger:
logger.propagate = True
else:
logger.propagate = False
for i in xrange(self.map['N']):
for j in xrange(self.map['M']):
if self.map['board'][i][j] == MAP_GOAL:
self.goal = (i, j)
def get_next_frame(self):
"""
@returns next frame of simulation data
@note the queue is thread-safe and it works like consumer-producer
those frames should be consumed by rendering thread
"""
# if len(self.sim_frames) == 0: return None
return self.sim_frames.get()
def get_next_frame_nowait(self):
"""
@returns next frame of simulation data
@note Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.sim_frames.get_nowait()
def reset(self):
""" Reset state of the KrakrobotSimulator """
self.robot_path = []
self.collisions = []
self.results = None
self.goal_achieved = False
self.robot_timer = 0.0
self.sim_frames = Queue(100000)
self.finished = False
self.error = None
self.error_traceback = None
self.terminate_flag = False
self.logs = []
def run(self):
""" Runs simulations by quering the robot """
self.reset()
# Initialize robot object
robot = Robot(self.speed, self.turning_speed, self.gps_delay, self.sonar_time, TICK_MOVE, TICK_ROTATE, seed=self.seed)
robot.set(self.init_position[0], self.init_position[1], self.init_position[2])
robot.set_noise(new_s_noise=self.steering_noise,
new_d_noise=self.distance_noise,
new_m_noise=self.measurement_noise,
new_fs_drift=self.forward_steering_drift,
new_sonar_noise=self.sonar_noise,
new_c_noise=self.color_noise)
# Initialize robot controller object given by contestant
robot_controller = PythonTimedRobotController(self.robot_controller.clone())
robot_controller.init(x=self.init_position[0],
y=self.init_position[1],
angle=self.init_position[2],
steering_noise=robot.steering_noise,
distance_noise=robot.distance_noise,
forward_steering_drift=robot.forward_steering_drift,
speed=robot.speed,
turning_speed=robot.turning_speed,
execution_cpu_time_limit=self.execution_cpu_time_limit,
N=self.map['N'],
M=self.map['M'])
maximum_timedelta = datetime.timedelta(seconds=self.execution_cpu_time_limit)
self.robot_path.append((robot.x, robot.y))
collision_counter = 0 # We have maximum collision allowed
frame_time_left = self.simulation_dt
frame_count = 0
current_command = None
iteration = 0
beeps = []
communicated_finished = False
try:
while not communicated_finished \
and not robot.time_elapsed >= self.simulation_time_limit \
and not self.terminate_flag:
if maximum_timedelta <= robot_controller.time_consumed:
raise KrakrobotException("Robot has exceeded CPU time limit")
if iteration % self.iteration_write_frequency == 0:
logger.info("Iteration {0}, produced {1} frames".format(iteration,
frame_count))
logger.info("Elapsed {0}".format(robot.time_elapsed))
logger.info("Current command: {}".format(current_command))
iteration += 1
if frame_time_left > self.frame_dt and not self.command_line:
### Save frame <=> last command took long ###
if len(self.robot_path) == 0 or \
robot.x != self.robot_path[-1][0] or robot.y != self.robot_path[-1][1]:
self.robot_path.append((robot.x, robot.y))
self.sim_frames.put(self._create_sim_data(robot, beeps))
frame_count += 1
frame_time_left -= self.frame_dt
if current_command is not None:
### Process current command ###
if current_command[0] == TURN:
robot = robot.turn(np.sign(current_command[1]))
frame_time_left += TICK_ROTATE / self.turning_speed
elif current_command[0] == MOVE:
robot_proposed = robot.move(np.sign(current_command[1]))
if not robot_proposed.check_collision(self.map['board']):
collision_counter += 1
self.collisions.append((robot_proposed.x, robot_proposed.y))
logger.error("Collision")
if collision_counter >= COLLISION_THRESHOLD:
raise KrakrobotException \
("The robot has been destroyed by a wall.")
else:
robot = robot_proposed
frame_time_left += TICK_MOVE / self.speed
else:
raise KrakrobotException("The robot hasn't supplied any command")
if current_command[1] == 0:
current_command = None
else:
current_command = [current_command[0], current_command[1] - np.sign(current_command[1])]
else:
### Get current command ###
command = None
try:
r, g, b = robot.sense_color(self.map)
robot_controller.on_sense_color(r, g, b)
command = robot_controller.act(robot.time_elapsed)
except Exception, e:
logger.error("Robot controller failed with exception " + str(e))
logger.error(traceback.format_exc())
raise KrakrobotException("Robot controller failed with exception " + str(e))
# logger.info("Robot timer "+str(robot.time_elapsed))
if not command :
raise KrakrobotException("No command returned from the robot controller")
command = list(command)
if len(command) == 0:
raise KrakrobotException("Zero length command returned from the robot controller")
if command[0] not in self.accepted_commands:
raise KrakrobotException("Not allowed command " + str(command[0]))
# Dispatch command
if command[0] == SENSE_GPS:
robot_controller.on_sense_gps(*robot.sense_gps())
frame_time_left += self.gps_delay
elif command[0] == WRITE_CONSOLE:
new_line = "{'frame': " + str(frame_count) + \
", 'time': " + str(robot.time_elapsed) + \
'}:\n' + command[1]
self.logs.append(new_line)
if self.print_robot:
print new_line
elif command[0] == SENSE_SONAR:
w = robot.sense_sonar(self.map['board'])
robot_controller.on_sense_sonar(w)
frame_time_left += self.sonar_time
elif command[0] == SENSE_COLOR:
r, g, b = robot.sense_color(self.map)
robot_controller.on_sense_color(r, g, b)
frame_time_left += self.light_sensor_time
elif command[0] == TURN:
if len(command) <= 1 or len(command) > 2:
raise KrakrobotException("Incorrect command length")
current_command = command
try:
current_command[1] = int(current_command[1])
except ValueError:
raise KrakrobotException("TURN: Incorrect argument type: expected int, got '{}'".format(current_command[1]))
elif command[0] == MOVE:
if len(command) <= 1 or len(command) > 2:
raise KrakrobotException("Incorrect command length")
current_command = command
try:
current_command[1] = int(current_command[1])
except ValueError:
raise KrakrobotException("MOVE: Incorrect argument type: expected int, got '{}'".format(current_command[1]))
elif command[0] == BEEP:
beeps.append((robot.x, robot.y, robot.time_elapsed))
elif command[0] == FINISH:
logger.info("Communicated finishing")
communicated_finished = True
else:
raise KrakrobotException("Not received command from act(), or command was incorrect")
except Exception, e:
logger.error("Simulation failed with exception " + str(e) + " after " + str(robot.time_elapsed) + " time")
self.error = str(e)
self.error_traceback = str(traceback.format_exc())
self.sim_frames.put(self._create_sim_data(robot, beeps))
while frame_time_left >= self.frame_dt and not self.command_line and not self.terminate_flag:
### Save frame <=> last command took long ###
self.sim_frames.put(self._create_sim_data(robot, beeps))
frame_time_left -= self.frame_dt
# Simulation process finished
self.finished = True
logger.info("Exiting")
self.results = None
try:
# Return simulation results
map_to_save = dict(self.map)
del map_to_save['color_bitmap']
self.results = {
"final_position": (robot.x, robot.y),
"sim_time": robot.time_elapsed,
"cpu_time": robot_controller.time_consumed.total_seconds(),
"error": self.error or False,
"error_traceback": self.error_traceback or False,
"finished": communicated_finished,
"beeps": beeps,
"map": map_to_save,
"parameters": {
"distance_noise": self.distance_noise,
"steering_noise": self.steering_noise,
"forward_steering_drift": self.forward_steering_drift,
"seed": self.seed,
}
}
# calculate points for this year's task
# if there was any error in the simulation
if self.error:
points = 0
task_time = self.simulation_time_limit
# if the robot didn't finish the task in the time limit
elif not communicated_finished:
points = 0
task_time = self.simulation_time_limit
# if the robot finished before the time limit
else:
n=len(beeps)
# if the robot made more than 3 beeps
if n>3:
points = 0
else:
points = 0
beeps_sequence = ['red', 'green', 'blue']
for i, color in enumerate(beeps_sequence):
# if there were enough beeps and the i-th beep was at a field of the right color
if i < n and (int(beeps[i][0]), int(beeps[i][1])) in self.map[color]:
points += 1
task_time = robot.time_elapsed - beeps[0][2]
self.results['points'] = points
self.results['task_time'] = task_time
logger.info("Simulation ended after " + str(robot.time_elapsed) + " seconds, communicated_finish=" + str(
communicated_finished))
return self.results
except Exception, e:
self.results = None
logger.error("Failed constructing result " + str(e))
return {"error": str(e)}
def get_results(self):
return self.results
def get_logs(self):
return self.logs
def _create_sim_data(self, robot, beeps):
"""
@returns Descriptor that is sufficient to visualize current frame
"""
data = {}
data['Sparks'] = list(beeps) # ommiting errors list(self.collisions)
data['ActualPath'] = list(self.robot_path)
data['ActualPosition'] = [robot.x, robot.y]
data['ActualOrientation'] = robot.orientation
data['Map'] = self.map
data['StartPos'] = self.init_position
return data
def terminate(self):
self.terminate_flag = True
self.robot_controller.terminate()
| {
"content_hash": "f949e7f094146694f085f5054baf517a",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 136,
"avg_line_length": 42.04694835680751,
"alnum_prop": 0.5227221974095578,
"repo_name": "uj-robotics/krakrobot2016-online",
"id": "4ecbc7f7482b8c0cc5e1df08cbc992d212952fbe",
"size": "17937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulator/simulator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "202647"
}
],
"symlink_target": ""
} |
"""
Set of functions that streamline controller initialization process
"""
import json
import os
from copy import deepcopy
import numpy as np
from .interpolators.linear_interpolator import LinearInterpolator
from .joint_pos import JointPositionController
from .joint_tor import JointTorqueController
from .joint_vel import JointVelocityController
from .osc import OperationalSpaceController
# Global var for linking pybullet server to multiple ik controller instances if necessary
pybullet_server = None
def reset_controllers():
"""
Global function for doing one-time clears and restarting of any global controller-related
specifics before re-initializing each individual controller again
"""
global pybullet_server
# Disconnect and reconnect to pybullet server if it exists
if pybullet_server is not None:
pybullet_server.disconnect()
pybullet_server.connect()
def get_pybullet_server():
"""
Getter to return reference to pybullet server module variable
Returns:
PyBulletServer: Server instance running PyBullet
"""
global pybullet_server
return pybullet_server
def load_controller_config(custom_fpath=None, default_controller=None):
"""
Utility function that loads the desired controller and returns the loaded configuration as a dict
If @default_controller is specified, any value inputted to @custom_fpath is overridden and the default controller
configuration is automatically loaded. See specific arg description below for available default controllers.
Args:
custom_fpath (str): Absolute filepath to the custom controller configuration .json file to be loaded
default_controller (str): If specified, overrides @custom_fpath and loads a default configuration file for the
specified controller.
Choices are: {"JOINT_POSITION", "JOINT_TORQUE", "JOINT_VELOCITY", "OSC_POSITION", "OSC_POSE", "IK_POSE"}
Returns:
dict: Controller configuration
Raises:
AssertionError: [Unknown default controller name]
AssertionError: [No controller specified]
"""
# First check if default controller is not None; if it is not, load the appropriate controller
if default_controller is not None:
# Assert that requested default controller is in the available default controllers
from robosuite.controllers import ALL_CONTROLLERS
assert (
default_controller in ALL_CONTROLLERS
), "Error: Unknown default controller specified. Requested {}, " "available controllers: {}".format(
default_controller, list(ALL_CONTROLLERS)
)
# Store the default controller config fpath associated with the requested controller
custom_fpath = os.path.join(
os.path.dirname(__file__), "..", "controllers/config/{}.json".format(default_controller.lower())
)
# Assert that the fpath to load the controller is not empty
assert custom_fpath is not None, "Error: Either custom_fpath or default_controller must be specified!"
# Attempt to load the controller
try:
with open(custom_fpath) as f:
controller_config = json.load(f)
except FileNotFoundError:
print("Error opening controller filepath at: {}. " "Please check filepath and try again.".format(custom_fpath))
# Return the loaded controller
return controller_config
def controller_factory(name, params):
"""
Generator for controllers
Creates a Controller instance with the provided @name and relevant @params.
Args:
name (str): the name of the controller. Must be one of: {JOINT_POSITION, JOINT_TORQUE, JOINT_VELOCITY,
OSC_POSITION, OSC_POSE, IK_POSE}
params (dict): dict containing the relevant params to pass to the controller
sim (MjSim): Mujoco sim reference to pass to the controller
Returns:
Controller: Controller instance
Raises:
ValueError: [unknown controller]
"""
interpolator = None
if params["interpolation"] == "linear":
interpolator = LinearInterpolator(
ndim=params["ndim"],
controller_freq=(1 / params["sim"].model.opt.timestep),
policy_freq=params["policy_freq"],
ramp_ratio=params["ramp_ratio"],
)
if name == "OSC_POSE":
ori_interpolator = None
if interpolator is not None:
interpolator.set_states(dim=3) # EE control uses dim 3 for pos and ori each
ori_interpolator = deepcopy(interpolator)
ori_interpolator.set_states(ori="euler")
params["control_ori"] = True
return OperationalSpaceController(interpolator_pos=interpolator, interpolator_ori=ori_interpolator, **params)
if name == "OSC_POSITION":
if interpolator is not None:
interpolator.set_states(dim=3) # EE control uses dim 3 for pos
params["control_ori"] = False
return OperationalSpaceController(interpolator_pos=interpolator, **params)
if name == "IK_POSE":
ori_interpolator = None
if interpolator is not None:
interpolator.set_states(dim=3) # EE IK control uses dim 3 for pos and dim 4 for ori
ori_interpolator = deepcopy(interpolator)
ori_interpolator.set_states(dim=4, ori="quat")
# Import pybullet server if necessary
global pybullet_server
from .ik import InverseKinematicsController
if pybullet_server is None:
from robosuite.controllers.ik import PyBulletServer
pybullet_server = PyBulletServer()
return InverseKinematicsController(
interpolator_pos=interpolator,
interpolator_ori=ori_interpolator,
bullet_server_id=pybullet_server.server_id,
**params,
)
if name == "JOINT_VELOCITY":
return JointVelocityController(interpolator=interpolator, **params)
if name == "JOINT_POSITION":
return JointPositionController(interpolator=interpolator, **params)
if name == "JOINT_TORQUE":
return JointTorqueController(interpolator=interpolator, **params)
raise ValueError("Unknown controller name: {}".format(name))
| {
"content_hash": "15ba187f819665f16c9df8c8c1a59166",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 119,
"avg_line_length": 37.226190476190474,
"alnum_prop": 0.6874000639590662,
"repo_name": "ARISE-Initiative/robosuite",
"id": "12eac96736b0dd4d181bf690b460cbf302b52162",
"size": "6254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robosuite/controllers/controller_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "552"
},
{
"name": "Python",
"bytes": "1197777"
}
],
"symlink_target": ""
} |
import re
import os
import sys
from inspect import ismethod
from time import sleep
from OSEncryptionState import *
class EncryptBlockDeviceState(OSEncryptionState):
def __init__(self, context):
super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter encrypt_block_device state")
if not super(EncryptBlockDeviceState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for encrypt_block_device state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering encrypt_block_device state")
self.context.logger.log("Resizing " + self.rootfs_block_device)
current_rootfs_size = self._get_root_fs_size_in_sectors(sector_size=512)
desired_rootfs_size = current_rootfs_size - 8192
self.command_executor.Execute('resize2fs {0} {1}s'.format(self.rootfs_block_device, desired_rootfs_size), True)
self.command_executor.Execute('mount /boot', False)
# self._find_bek_and_execute_action('_dump_passphrase')
self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',
status=CommonVariables.extension_success_status,
status_code=str(CommonVariables.success),
message='OS disk encryption started')
self._find_bek_and_execute_action('_luks_reencrypt')
def should_exit(self):
self.context.logger.log("Verifying if machine should exit encrypt_block_device state")
if not os.path.exists('/dev/mapper/osencrypt'):
self._find_bek_and_execute_action('_luks_open')
self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)
self.command_executor.Execute('umount /oldroot', True)
return super(EncryptBlockDeviceState, self).should_exit()
def _luks_open(self, bek_path):
self.command_executor.Execute('cryptsetup luksOpen {0} osencrypt -d {1}'.format(self.rootfs_block_device, bek_path),
raise_exception_on_failure=True)
def _luks_reencrypt(self, bek_path):
self.command_executor.ExecuteInBash('cat {0} | cryptsetup-reencrypt -N --reduce-device-size 8192s {1} -v'.format(bek_path,
self.rootfs_block_device),
raise_exception_on_failure=True)
def _dump_passphrase(self, bek_path):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="od -c {0}".format(bek_path),
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Passphrase:")
self.context.logger.log(proc_comm.stdout)
def _find_bek_and_execute_action(self, callback_method_name):
callback_method = getattr(self, callback_method_name)
if not ismethod(callback_method):
raise Exception("{0} is not a method".format(callback_method_name))
bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)
callback_method(bek_path)
def _get_root_fs_size_in_sectors(self, sector_size):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="dumpe2fs -h {0}".format(self.rootfs_block_device),
raise_exception_on_failure=True,
communicator=proc_comm)
root_fs_block_count = re.findall(r'Block count:\s*(\d+)', proc_comm.stdout)
root_fs_block_size = re.findall(r'Block size:\s*(\d+)', proc_comm.stdout)
if not root_fs_block_count or not root_fs_block_size:
raise Exception("Error parsing dumpe2fs output, count={0}, size={1}".format(root_fs_block_count,
root_fs_block_size))
root_fs_block_count = int(root_fs_block_count[0])
root_fs_block_size = int(root_fs_block_size[0])
return (root_fs_block_count * root_fs_block_size) / sector_size
| {
"content_hash": "3b63aa120ee6b1cca2c9b021a3ee6266",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 147,
"avg_line_length": 45.525252525252526,
"alnum_prop": 0.597293099622809,
"repo_name": "soumyanishan/azure-linux-extensions",
"id": "87cf31d7e04023da732cc625e0bf2cb0baaab6d0",
"size": "5168",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "VMEncryption/main/oscrypto/rhel_68/encryptstates/EncryptBlockDeviceState.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "75094"
},
{
"name": "C++",
"bytes": "1038084"
},
{
"name": "CMake",
"bytes": "11642"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "7385"
},
{
"name": "PowerShell",
"bytes": "24124"
},
{
"name": "Python",
"bytes": "4380432"
},
{
"name": "Roff",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "30126"
}
],
"symlink_target": ""
} |
import asyncio
import datetime
import json
import logging
import os
import socket
import sys
import traceback
import warnings
import psutil
from typing import List, Optional, Tuple
import ray
import ray._private.services
import ray._private.utils
from ray.dashboard.consts import (
GCS_RPC_TIMEOUT_SECONDS,
COMPONENT_METRICS_TAG_KEYS,
AVAILABLE_COMPONENT_NAMES_FOR_METRICS,
)
from ray.dashboard.modules.reporter.profile_manager import CpuProfilingManager
import ray.dashboard.modules.reporter.reporter_consts as reporter_consts
import ray.dashboard.utils as dashboard_utils
from opencensus.stats import stats as stats_module
import ray._private.prometheus_exporter as prometheus_exporter
from prometheus_client.core import REGISTRY
from ray._private.metrics_agent import Gauge, MetricsAgent, Record
from ray._private.ray_constants import DEBUG_AUTOSCALING_STATUS
from ray.core.generated import reporter_pb2, reporter_pb2_grpc
from ray.util.debug import log_once
from ray.dashboard import k8s_utils
from ray._raylet import WorkerID
logger = logging.getLogger(__name__)
enable_gpu_usage_check = True
# Are we in a K8s pod?
IN_KUBERNETES_POD = "KUBERNETES_SERVICE_HOST" in os.environ
# Flag to enable showing disk usage when running in a K8s pod,
# disk usage defined as the result of running psutil.disk_usage("/")
# in the Ray container.
ENABLE_K8S_DISK_USAGE = os.environ.get("RAY_DASHBOARD_ENABLE_K8S_DISK_USAGE") == "1"
# Try to determine if we're in a container.
IN_CONTAINER = os.path.exists("/sys/fs/cgroup")
# Using existence of /sys/fs/cgroup as the criterion is consistent with
# Ray's existing resource logic, see e.g. ray._private.utils.get_num_cpus().
try:
import gpustat.core as gpustat
except ModuleNotFoundError:
gpustat = None
if log_once("gpustat_import_warning"):
warnings.warn(
"`gpustat` package is not installed. GPU monitoring is "
"not available. To have full functionality of the "
"dashboard please install `pip install ray["
"default]`.)"
)
except ImportError as e:
gpustat = None
if log_once("gpustat_import_warning"):
warnings.warn(
"Importing gpustat failed, fix this to have full "
"functionality of the dashboard. The original error was:\n\n" + e.msg
)
def recursive_asdict(o):
if isinstance(o, tuple) and hasattr(o, "_asdict"):
return recursive_asdict(o._asdict())
if isinstance(o, (tuple, list)):
L = []
for k in o:
L.append(recursive_asdict(k))
return L
if isinstance(o, dict):
D = {k: recursive_asdict(v) for k, v in o.items()}
return D
return o
def jsonify_asdict(o) -> str:
return json.dumps(dashboard_utils.to_google_style(recursive_asdict(o)))
# A list of gauges to record and export metrics.
METRICS_GAUGES = {
"node_cpu_utilization": Gauge(
"node_cpu_utilization",
"Total CPU usage on a ray node",
"percentage",
["ip", "SessionName"],
),
"node_cpu_count": Gauge(
"node_cpu_count",
"Total CPUs available on a ray node",
"cores",
["ip", "SessionName"],
),
"node_mem_used": Gauge(
"node_mem_used", "Memory usage on a ray node", "bytes", ["ip", "SessionName"]
),
"node_mem_available": Gauge(
"node_mem_available",
"Memory available on a ray node",
"bytes",
["ip", "SessionName"],
),
"node_mem_total": Gauge(
"node_mem_total", "Total memory on a ray node", "bytes", ["ip", "SessionName"]
),
"node_gpus_available": Gauge(
"node_gpus_available",
"Total GPUs available on a ray node",
"percentage",
["ip", "SessionName"],
),
"node_gpus_utilization": Gauge(
"node_gpus_utilization",
"Total GPUs usage on a ray node",
"percentage",
["ip", "SessionName"],
),
"node_gram_used": Gauge(
"node_gram_used",
"Total GPU RAM usage on a ray node",
"bytes",
["ip", "SessionName"],
),
"node_gram_available": Gauge(
"node_gram_available",
"Total GPU RAM available on a ray node",
"bytes",
["ip", "SessionName"],
),
"node_disk_io_read": Gauge(
"node_disk_io_read", "Total read from disk", "bytes", ["ip", "SessionName"]
),
"node_disk_io_write": Gauge(
"node_disk_io_write", "Total written to disk", "bytes", ["ip", "SessionName"]
),
"node_disk_io_read_count": Gauge(
"node_disk_io_read_count",
"Total read ops from disk",
"io",
["ip", "SessionName"],
),
"node_disk_io_write_count": Gauge(
"node_disk_io_write_count",
"Total write ops to disk",
"io",
["ip", "SessionName"],
),
"node_disk_io_read_speed": Gauge(
"node_disk_io_read_speed", "Disk read speed", "bytes/sec", ["ip", "SessionName"]
),
"node_disk_io_write_speed": Gauge(
"node_disk_io_write_speed",
"Disk write speed",
"bytes/sec",
["ip", "SessionName"],
),
"node_disk_read_iops": Gauge(
"node_disk_read_iops", "Disk read iops", "iops", ["ip", "SessionName"]
),
"node_disk_write_iops": Gauge(
"node_disk_write_iops", "Disk write iops", "iops", ["ip", "SessionName"]
),
"node_disk_usage": Gauge(
"node_disk_usage",
"Total disk usage (bytes) on a ray node",
"bytes",
["ip", "SessionName"],
),
"node_disk_free": Gauge(
"node_disk_free",
"Total disk free (bytes) on a ray node",
"bytes",
["ip", "SessionName"],
),
"node_disk_utilization_percentage": Gauge(
"node_disk_utilization_percentage",
"Total disk utilization (percentage) on a ray node",
"percentage",
["ip", "SessionName"],
),
"node_network_sent": Gauge(
"node_network_sent", "Total network sent", "bytes", ["ip", "SessionName"]
),
"node_network_received": Gauge(
"node_network_received",
"Total network received",
"bytes",
["ip", "SessionName"],
),
"node_network_send_speed": Gauge(
"node_network_send_speed",
"Network send speed",
"bytes/sec",
["ip", "SessionName"],
),
"node_network_receive_speed": Gauge(
"node_network_receive_speed",
"Network receive speed",
"bytes/sec",
["ip", "SessionName"],
),
"component_cpu_percentage": Gauge(
"component_cpu_percentage",
"Total CPU usage of the components on a node.",
"percentage",
COMPONENT_METRICS_TAG_KEYS,
),
"component_rss_mb": Gauge(
"component_rss_mb",
"RSS usage of all components on the node.",
"MB",
COMPONENT_METRICS_TAG_KEYS,
),
"component_uss_mb": Gauge(
"component_uss_mb",
"USS usage of all components on the node.",
"MB",
COMPONENT_METRICS_TAG_KEYS,
),
"cluster_active_nodes": Gauge(
"cluster_active_nodes",
"Active nodes on the cluster",
"count",
["node_type", "SessionName"],
),
"cluster_failed_nodes": Gauge(
"cluster_failed_nodes",
"Failed nodes on the cluster",
"count",
["node_type", "SessionName"],
),
"cluster_pending_nodes": Gauge(
"cluster_pending_nodes",
"Pending nodes on the cluster",
"count",
["node_type", "SessionName"],
),
}
class ReporterAgent(
dashboard_utils.DashboardAgentModule, reporter_pb2_grpc.ReporterServiceServicer
):
"""A monitor process for monitoring Ray nodes.
Attributes:
dashboard_agent: The DashboardAgent object contains global config
"""
def __init__(self, dashboard_agent):
"""Initialize the reporter object."""
super().__init__(dashboard_agent)
if IN_KUBERNETES_POD or IN_CONTAINER:
# psutil does not give a meaningful logical cpu count when in a K8s pod, or
# in a container in general.
# Use ray._private.utils for this instead.
logical_cpu_count = ray._private.utils.get_num_cpus(
override_docker_cpu_warning=True
)
# (Override the docker warning to avoid dashboard log spam.)
# The dashboard expects a physical CPU count as well.
# This is not always meaningful in a container, but we will go ahead
# and give the dashboard what it wants using psutil.
physical_cpu_count = psutil.cpu_count(logical=False)
else:
logical_cpu_count = psutil.cpu_count()
physical_cpu_count = psutil.cpu_count(logical=False)
self._cpu_counts = (logical_cpu_count, physical_cpu_count)
self._gcs_aio_client = dashboard_agent.gcs_aio_client
self._ip = dashboard_agent.ip
self._log_dir = dashboard_agent.log_dir
self._is_head_node = self._ip == dashboard_agent.gcs_address.split(":")[0]
self._hostname = socket.gethostname()
# (pid, created_time) -> psutil.Process
self._workers = {}
self._network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
self._disk_io_stats_hist = [
(0, (0.0, 0.0, 0, 0))
] # time, (bytes read, bytes written, read ops, write ops)
self._metrics_collection_disabled = dashboard_agent.metrics_collection_disabled
self._metrics_agent = None
self._session_name = dashboard_agent.session_name
if not self._metrics_collection_disabled:
try:
stats_exporter = prometheus_exporter.new_stats_exporter(
prometheus_exporter.Options(
namespace="ray",
port=dashboard_agent.metrics_export_port,
address="127.0.0.1" if self._ip == "127.0.0.1" else "",
)
)
except Exception:
# TODO(SongGuyang): Catch the exception here because there is
# port conflict issue which brought from static port. We should
# remove this after we find better port resolution.
logger.exception(
"Failed to start prometheus stats exporter. Agent will stay "
"alive but disable the stats."
)
stats_exporter = None
self._metrics_agent = MetricsAgent(
stats_module.stats.view_manager,
stats_module.stats.stats_recorder,
stats_exporter,
)
if self._metrics_agent.proxy_exporter_collector:
# proxy_exporter_collector is None
# if Prometheus server is not started.
REGISTRY.register(self._metrics_agent.proxy_exporter_collector)
self._key = (
f"{reporter_consts.REPORTER_PREFIX}" f"{self._dashboard_agent.node_id}"
)
async def GetTraceback(self, request, context):
pid = request.pid
p = CpuProfilingManager(self._log_dir)
success, output = await p.trace_dump(pid)
return reporter_pb2.GetTracebackReply(output=output, success=success)
async def CpuProfiling(self, request, context):
pid = request.pid
duration = request.duration
format = request.format
p = CpuProfilingManager(self._log_dir)
success, output = await p.cpu_profile(pid, format=format, duration=duration)
return reporter_pb2.CpuProfilingReply(output=output, success=success)
async def ReportOCMetrics(self, request, context):
# Do nothing if metrics collection is disabled.
if self._metrics_collection_disabled:
return reporter_pb2.ReportOCMetricsReply()
# This function receives a GRPC containing OpenCensus (OC) metrics
# from a Ray process, then exposes those metrics to Prometheus.
try:
worker_id = WorkerID(request.worker_id)
worker_id = None if worker_id.is_nil() else worker_id.hex()
self._metrics_agent.proxy_export_metrics(request.metrics, worker_id)
except Exception:
logger.error(traceback.format_exc())
return reporter_pb2.ReportOCMetricsReply()
@staticmethod
def _get_cpu_percent(in_k8s: bool):
if in_k8s:
return k8s_utils.cpu_percent()
else:
return psutil.cpu_percent()
@staticmethod
def _get_gpu_usage():
global enable_gpu_usage_check
if gpustat is None or not enable_gpu_usage_check:
return []
gpu_utilizations = []
gpus = []
try:
gpus = gpustat.new_query().gpus
except Exception as e:
logger.debug(f"gpustat failed to retrieve GPU information: {e}")
# gpustat calls pynvml.nvmlInit()
# On machines without GPUs, this can run subprocesses that spew to
# stderr. Then with log_to_driver=True, we get log spew from every
# single raylet. To avoid this, disable the GPU usage check on
# certain errors.
# https://github.com/ray-project/ray/issues/14305
# https://github.com/ray-project/ray/pull/21686
if type(e).__name__ == "NVMLError_DriverNotLoaded":
enable_gpu_usage_check = False
for gpu in gpus:
# Note the keys in this dict have periods which throws
# off javascript so we change .s to _s
gpu_data = {"_".join(key.split(".")): val for key, val in gpu.entry.items()}
gpu_utilizations.append(gpu_data)
return gpu_utilizations
@staticmethod
def _get_boot_time():
if IN_KUBERNETES_POD:
# Return start time of container entrypoint
return psutil.Process(pid=1).create_time()
else:
return psutil.boot_time()
@staticmethod
def _get_network_stats():
ifaces = [
v for k, v in psutil.net_io_counters(pernic=True).items() if k[0] == "e"
]
sent = sum((iface.bytes_sent for iface in ifaces))
recv = sum((iface.bytes_recv for iface in ifaces))
return sent, recv
@staticmethod
def _get_mem_usage():
total = ray._private.utils.get_system_memory()
used = ray._private.utils.get_used_memory()
available = total - used
percent = round(used / total, 3) * 100
return total, available, percent, used
@staticmethod
def _get_disk_usage():
if IN_KUBERNETES_POD and not ENABLE_K8S_DISK_USAGE:
# If in a K8s pod, disable disk display by passing in dummy values.
return {
"/": psutil._common.sdiskusage(total=1, used=0, free=1, percent=0.0)
}
if sys.platform == "win32":
root = psutil.disk_partitions()[0].mountpoint
else:
root = os.sep
tmp = ray._private.utils.get_user_temp_dir()
return {
"/": psutil.disk_usage(root),
tmp: psutil.disk_usage(tmp),
}
@staticmethod
def _get_disk_io_stats():
stats = psutil.disk_io_counters()
# stats can be None or {} if the machine is diskless.
# https://psutil.readthedocs.io/en/latest/#psutil.disk_io_counters
if not stats:
return (0, 0, 0, 0)
else:
return (
stats.read_bytes,
stats.write_bytes,
stats.read_count,
stats.write_count,
)
def _get_agent_proc(self) -> psutil.Process:
# Agent is the current process.
# This method is not necessary, but we have it for mock testing.
return psutil.Process()
def _generate_worker_key(self, proc: psutil.Process) -> Tuple[int, float]:
return (proc.pid, proc.create_time())
def _get_workers(self):
raylet_proc = self._get_raylet_proc()
if raylet_proc is None:
return []
else:
workers = {
self._generate_worker_key(proc): proc for proc in raylet_proc.children()
}
# We should keep `raylet_proc.children()` in `self` because
# when `cpu_percent` is first called, it returns the meaningless 0.
# See more: https://github.com/ray-project/ray/issues/29848
keys_to_pop = []
# Add all new workers.
for key, worker in workers.items():
if key not in self._workers:
self._workers[key] = worker
# Pop out stale workers.
for key in self._workers:
if key not in workers:
keys_to_pop.append(key)
for k in keys_to_pop:
self._workers.pop(k)
# Remove the current process (reporter agent), which is also a child of
# the Raylet.
self._workers.pop(self._generate_worker_key(self._get_agent_proc()))
return [
w.as_dict(
attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
"memory_full_info",
]
)
for w in self._workers.values()
if w.status() != psutil.STATUS_ZOMBIE
]
@staticmethod
def _get_raylet_proc():
try:
curr_proc = psutil.Process()
# Here, parent is always raylet because the
# dashboard agent is a child of the raylet process.
parent = curr_proc.parent()
if parent is not None:
if parent.pid == 1:
return None
if parent.status() == psutil.STATUS_ZOMBIE:
return None
return parent
except (psutil.AccessDenied, ProcessLookupError):
pass
return None
def _get_raylet(self):
raylet_proc = self._get_raylet_proc()
if raylet_proc is None:
return {}
else:
return raylet_proc.as_dict(
attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
"memory_full_info",
]
)
def _get_agent(self):
# Current proc == agent proc
agent_proc = psutil.Process()
return agent_proc.as_dict(
attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
"memory_full_info",
]
)
def _get_load_avg(self):
if sys.platform == "win32":
cpu_percent = psutil.cpu_percent()
load = (cpu_percent, cpu_percent, cpu_percent)
else:
load = os.getloadavg()
per_cpu_load = tuple((round(x / self._cpu_counts[0], 2) for x in load))
return load, per_cpu_load
@staticmethod
def _compute_speed_from_hist(hist):
while len(hist) > 7:
hist.pop(0)
then, prev_stats = hist[0]
now, now_stats = hist[-1]
time_delta = now - then
return tuple((y - x) / time_delta for x, y in zip(prev_stats, now_stats))
def _get_all_stats(self):
now = dashboard_utils.to_posix_time(datetime.datetime.utcnow())
network_stats = self._get_network_stats()
self._network_stats_hist.append((now, network_stats))
network_speed_stats = self._compute_speed_from_hist(self._network_stats_hist)
disk_stats = self._get_disk_io_stats()
self._disk_io_stats_hist.append((now, disk_stats))
disk_speed_stats = self._compute_speed_from_hist(self._disk_io_stats_hist)
return {
"now": now,
"hostname": self._hostname,
"ip": self._ip,
"cpu": self._get_cpu_percent(IN_KUBERNETES_POD),
"cpus": self._cpu_counts,
"mem": self._get_mem_usage(),
"workers": self._get_workers(),
"raylet": self._get_raylet(),
"agent": self._get_agent(),
"bootTime": self._get_boot_time(),
"loadAvg": self._get_load_avg(),
"disk": self._get_disk_usage(),
"disk_io": disk_stats,
"disk_io_speed": disk_speed_stats,
"gpus": self._get_gpu_usage(),
"network": network_stats,
"network_speed": network_speed_stats,
# Deprecated field, should be removed with frontend.
"cmdline": self._get_raylet().get("cmdline", []),
}
def _record_stats(self, stats, cluster_stats):
records_reported = []
ip = stats["ip"]
# -- Instance count of cluster --
# Only report cluster stats on head node
if "autoscaler_report" in cluster_stats and self._is_head_node:
active_nodes = cluster_stats["autoscaler_report"]["active_nodes"]
for node_type, active_node_count in active_nodes.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_active_nodes"],
value=active_node_count,
tags={"node_type": node_type},
)
)
failed_nodes = cluster_stats["autoscaler_report"]["failed_nodes"]
failed_nodes_dict = {}
for node_ip, node_type in failed_nodes:
if node_type in failed_nodes_dict:
failed_nodes_dict[node_type] += 1
else:
failed_nodes_dict[node_type] = 1
for node_type, failed_node_count in failed_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_failed_nodes"],
value=failed_node_count,
tags={"node_type": node_type},
)
)
pending_nodes = cluster_stats["autoscaler_report"]["pending_nodes"]
pending_nodes_dict = {}
for node_ip, node_type, status_message in pending_nodes:
if node_type in pending_nodes_dict:
pending_nodes_dict[node_type] += 1
else:
pending_nodes_dict[node_type] = 1
for node_type, pending_node_count in pending_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_pending_nodes"],
value=pending_node_count,
tags={"node_type": node_type},
)
)
# -- CPU per node --
cpu_usage = float(stats["cpu"])
cpu_record = Record(
gauge=METRICS_GAUGES["node_cpu_utilization"],
value=cpu_usage,
tags={"ip": ip},
)
cpu_count, _ = stats["cpus"]
cpu_count_record = Record(
gauge=METRICS_GAUGES["node_cpu_count"], value=cpu_count, tags={"ip": ip}
)
# -- Mem per node --
mem_total, mem_available, _, mem_used = stats["mem"]
mem_used_record = Record(
gauge=METRICS_GAUGES["node_mem_used"], value=mem_used, tags={"ip": ip}
)
mem_available_record = Record(
gauge=METRICS_GAUGES["node_mem_available"],
value=mem_available,
tags={"ip": ip},
)
mem_total_record = Record(
gauge=METRICS_GAUGES["node_mem_total"], value=mem_total, tags={"ip": ip}
)
# -- GPU per node --
gpus = stats["gpus"]
gpus_available = len(gpus)
if gpus_available:
gpus_utilization, gram_used, gram_total = 0, 0, 0
for gpu in gpus:
# Consume GPU may not report its utilization.
if gpu["utilization_gpu"] is not None:
gpus_utilization += gpu["utilization_gpu"]
gram_used += gpu["memory_used"]
gram_total += gpu["memory_total"]
gram_available = gram_total - gram_used
gpus_available_record = Record(
gauge=METRICS_GAUGES["node_gpus_available"],
value=gpus_available,
tags={"ip": ip},
)
gpus_utilization_record = Record(
gauge=METRICS_GAUGES["node_gpus_utilization"],
value=gpus_utilization,
tags={"ip": ip},
)
gram_used_record = Record(
gauge=METRICS_GAUGES["node_gram_used"], value=gram_used, tags={"ip": ip}
)
gram_available_record = Record(
gauge=METRICS_GAUGES["node_gram_available"],
value=gram_available,
tags={"ip": ip},
)
records_reported.extend(
[
gpus_available_record,
gpus_utilization_record,
gram_used_record,
gram_available_record,
]
)
# -- Disk per node --
disk_io_stats = stats["disk_io"]
disk_read_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read"],
value=disk_io_stats[0],
tags={"ip": ip},
)
disk_write_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write"],
value=disk_io_stats[1],
tags={"ip": ip},
)
disk_read_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_count"],
value=disk_io_stats[2],
tags={"ip": ip},
)
disk_write_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_count"],
value=disk_io_stats[3],
tags={"ip": ip},
)
disk_io_speed_stats = stats["disk_io_speed"]
disk_read_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_speed"],
value=disk_io_speed_stats[0],
tags={"ip": ip},
)
disk_write_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_speed"],
value=disk_io_speed_stats[1],
tags={"ip": ip},
)
disk_read_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_read_iops"],
value=disk_io_speed_stats[2],
tags={"ip": ip},
)
disk_write_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_write_iops"],
value=disk_io_speed_stats[3],
tags={"ip": ip},
)
used, free = 0, 0
for entry in stats["disk"].values():
used += entry.used
free += entry.free
disk_utilization = float(used / (used + free)) * 100
disk_usage_record = Record(
gauge=METRICS_GAUGES["node_disk_usage"], value=used, tags={"ip": ip}
)
disk_free_record = Record(
gauge=METRICS_GAUGES["node_disk_free"], value=free, tags={"ip": ip}
)
disk_utilization_percentage_record = Record(
gauge=METRICS_GAUGES["node_disk_utilization_percentage"],
value=disk_utilization,
tags={"ip": ip},
)
# -- Network speed (send/receive) stats per node --
network_stats = stats["network"]
network_sent_record = Record(
gauge=METRICS_GAUGES["node_network_sent"],
value=network_stats[0],
tags={"ip": ip},
)
network_received_record = Record(
gauge=METRICS_GAUGES["node_network_received"],
value=network_stats[1],
tags={"ip": ip},
)
# -- Network speed (send/receive) per node --
network_speed_stats = stats["network_speed"]
network_send_speed_record = Record(
gauge=METRICS_GAUGES["node_network_send_speed"],
value=network_speed_stats[0],
tags={"ip": ip},
)
network_receive_speed_record = Record(
gauge=METRICS_GAUGES["node_network_receive_speed"],
value=network_speed_stats[1],
tags={"ip": ip},
)
"""
Record system stats.
"""
def record_system_stats(
stats: List[dict], component_name: str, pid: Optional[str] = None
) -> List[Record]:
assert component_name in AVAILABLE_COMPONENT_NAMES_FOR_METRICS
records = []
total_cpu_percentage = 0.0
total_rss = 0.0
total_uss = 0.0
for stat in stats:
total_cpu_percentage += float(stat["cpu_percent"]) * 100.0
total_rss += float(stat["memory_info"].rss) / 1.0e6
mem_full_info = stat.get("memory_full_info")
if mem_full_info is not None:
total_uss += float(mem_full_info.uss) / 1.0e6
tags = {"ip": ip, "Component": component_name}
if pid:
tags["pid"] = pid
records.append(
Record(
gauge=METRICS_GAUGES["component_cpu_percentage"],
value=total_cpu_percentage,
tags=tags,
)
)
records.append(
Record(
gauge=METRICS_GAUGES["component_rss_mb"],
value=total_rss,
tags=tags,
)
)
if total_uss > 0.0:
records.append(
Record(
gauge=METRICS_GAUGES["component_uss_mb"],
value=total_uss,
tags=tags,
)
)
return records
# Record component metrics.
raylet_stats = stats["raylet"]
if raylet_stats:
raylet_pid = str(raylet_stats["pid"])
records_reported.extend(
record_system_stats([raylet_stats], "raylet", pid=raylet_pid)
)
workers_stats = stats["workers"]
if workers_stats:
# TODO(sang): Maybe we can report per worker memory usage.
records_reported.extend(record_system_stats(workers_stats, "workers"))
agent_stats = stats["agent"]
if agent_stats:
agent_pid = str(agent_stats["pid"])
records_reported.extend(
record_system_stats([agent_stats], "agent", pid=agent_pid)
)
# TODO(sang): Record GCS metrics.
# NOTE: Dashboard metrics is recorded within the dashboard because
# it can be deployed as a standalone instance. It shouldn't
# depend on the agent.
records_reported.extend(
[
cpu_record,
cpu_count_record,
mem_used_record,
mem_available_record,
mem_total_record,
disk_read_record,
disk_write_record,
disk_read_count_record,
disk_write_count_record,
disk_read_speed_record,
disk_write_speed_record,
disk_read_iops_record,
disk_write_iops_record,
disk_usage_record,
disk_free_record,
disk_utilization_percentage_record,
network_sent_record,
network_received_record,
network_send_speed_record,
network_receive_speed_record,
]
)
return records_reported
async def _perform_iteration(self, publisher):
"""Get any changes to the log files and push updates to kv."""
while True:
try:
formatted_status_string = await self._gcs_aio_client.internal_kv_get(
DEBUG_AUTOSCALING_STATUS.encode(),
None,
timeout=GCS_RPC_TIMEOUT_SECONDS,
)
stats = self._get_all_stats()
# Report stats only when metrics collection is enabled.
if not self._metrics_collection_disabled:
cluster_stats = (
json.loads(formatted_status_string.decode())
if formatted_status_string
else {}
)
records_reported = self._record_stats(stats, cluster_stats)
self._metrics_agent.record_and_export(
records_reported,
global_tags={"SessionName": self._session_name},
)
self._metrics_agent.clean_all_dead_worker_metrics()
await publisher.publish_resource_usage(self._key, jsonify_asdict(stats))
except Exception:
logger.exception("Error publishing node physical stats.")
await asyncio.sleep(reporter_consts.REPORTER_UPDATE_INTERVAL_MS / 1000)
async def run(self, server):
if server:
reporter_pb2_grpc.add_ReporterServiceServicer_to_server(self, server)
await self._perform_iteration(self._dashboard_agent.publisher)
@staticmethod
def is_minimal_module():
return False
| {
"content_hash": "62e08fe33767d4cfc409e523aeb78750",
"timestamp": "",
"source": "github",
"line_count": 945,
"max_line_length": 88,
"avg_line_length": 35.81269841269841,
"alnum_prop": 0.5368613893567356,
"repo_name": "ray-project/ray",
"id": "38826aa41be45130e0bf45ce3b223dda334c5193",
"size": "33843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/modules/reporter/reporter_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals import six
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| {
"content_hash": "75aced925337c4f03947f330e63925e8",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 79,
"avg_line_length": 33.58894230769231,
"alnum_prop": 0.6186216274243184,
"repo_name": "rahul-c1/scikit-learn",
"id": "61e899053a6a3980015e3ceb5935b481e4b4aa53",
"size": "13973",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/applications/plot_out_of_core_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "384858"
},
{
"name": "C++",
"bytes": "137519"
},
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "4692671"
},
{
"name": "Shell",
"bytes": "3861"
}
],
"symlink_target": ""
} |
"""The tests for the sun automation."""
from datetime import datetime
from unittest.mock import patch
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.config.set_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
"""Restore."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
"""Test the sunset trigger."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {
"service": "test.automation",
"data_template": {"id": "{{ trigger.id}}"},
},
}
},
)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["id"] == 0
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunset trigger with offset."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger with offset."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
| {
"content_hash": "1cacbf32374192ce8a00056efa8bd4ff",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 86,
"avg_line_length": 31.88235294117647,
"alnum_prop": 0.553690036900369,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "f100fb53dc8a80ab41dc4fb921efcc11904d880b",
"size": "5420",
"binary": false,
"copies": "8",
"ref": "refs/heads/dev",
"path": "tests/components/sun/test_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Project action implementations"""
import six
from keystoneauth1 import exceptions as ks_exc
from eclcli.common import command
from eclcli.common import parseractions
from eclcli.common import utils
from eclcli.i18n import _ # noqa
from eclcli.identity import common
class CreateProject(command.ShowOne):
"""Create new project"""
def get_parser(self, prog_name):
parser = super(CreateProject, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<project-name>',
help='New project name',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning the project (name or ID)',
)
parser.add_argument(
'--parent',
metavar='<project>',
help='Parent of the project (name or ID)',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Project description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable project',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable project',
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help='Add a property to <name> '
'(repeat option to set multiple properties)',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing project'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
parent = None
if parsed_args.parent:
parent = utils.find_resource(
identity_client.projects,
parsed_args.parent,
).id
enabled = True
if parsed_args.disable:
enabled = False
kwargs = {}
if parsed_args.property:
kwargs = parsed_args.property.copy()
try:
project = identity_client.projects.create(
name=parsed_args.name,
domain=domain,
parent=parent,
description=parsed_args.description,
enabled=enabled,
**kwargs
)
except ks_exc.Conflict as e:
if parsed_args.or_show:
project = utils.find_resource(identity_client.projects,
parsed_args.name,
domain_id=domain)
self.log.info('Returning existing project %s', project.name)
else:
raise e
project._info.pop('links')
return zip(*sorted(six.iteritems(project._info)))
class DeleteProject(command.Command):
"""Delete project(s)"""
def get_parser(self, prog_name):
parser = super(DeleteProject, self).get_parser(prog_name)
parser.add_argument(
'projects',
metavar='<project>',
nargs="+",
help='Project(s) to delete (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <project> (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
for project in parsed_args.projects:
if domain is not None:
project_obj = utils.find_resource(identity_client.projects,
project,
domain_id=domain.id)
else:
project_obj = utils.find_resource(identity_client.projects,
project)
identity_client.projects.delete(project_obj.id)
class ListProject(command.Lister):
"""List projects"""
def get_parser(self, prog_name):
parser = super(ListProject, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Filter projects by <domain> (name or ID)',
)
parser.add_argument(
'--user',
metavar='<user>',
help='Filter projects by <user> (name or ID)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.long:
columns = ('ID', 'Name', 'Domain ID', 'Description', 'Enabled')
else:
columns = ('ID', 'Name')
kwargs = {}
domain_id = None
if parsed_args.domain:
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
kwargs['domain'] = domain_id
if parsed_args.user:
if parsed_args.domain:
user_id = utils.find_resource(identity_client.users,
parsed_args.user,
domain_id=domain_id).id
else:
user_id = utils.find_resource(identity_client.users,
parsed_args.user).id
kwargs['user'] = user_id
data = identity_client.projects.list(**kwargs)
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetProject(command.Command):
"""Set project properties"""
def get_parser(self, prog_name):
parser = super(SetProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help='Project to modify (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set project name',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <project> (name or ID)',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Set project description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable project',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable project',
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help='Set a property on <project> '
'(repeat option to set multiple properties)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if (not parsed_args.name
and not parsed_args.domain
and not parsed_args.description
and not parsed_args.enable
and not parsed_args.property
and not parsed_args.disable):
return
project = common.find_project(identity_client,
parsed_args.project,
parsed_args.domain)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
if parsed_args.property:
kwargs.update(parsed_args.property)
identity_client.projects.update(project.id, **kwargs)
class ShowProject(command.ShowOne):
"""Display project details"""
def get_parser(self, prog_name):
parser = super(ShowProject, self).get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project>',
help='Project to display (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='Domain owning <project> (name or ID)',
)
parser.add_argument(
'--parents',
action='store_true',
default=False,
help='Show the project\'s parents as a list',
)
parser.add_argument(
'--children',
action='store_true',
default=False,
help='Show project\'s subtree (children) as a list',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.domain:
domain = common.find_domain(identity_client, parsed_args.domain)
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
domain_id=domain.id,
parents_as_list=parsed_args.parents,
subtree_as_list=parsed_args.children)
else:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
parents_as_list=parsed_args.parents,
subtree_as_list=parsed_args.children)
if project._info.get('parents'):
project._info['parents'] = [str(p['project']['id'])
for p in project._info['parents']]
if project._info.get('subtree'):
project._info['subtree'] = [str(p['project']['id'])
for p in project._info['subtree']]
project._info.pop('links')
return zip(*sorted(six.iteritems(project._info)))
| {
"content_hash": "3cc8dc7b4ecae6d5dd16ad7b9991844d",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 76,
"avg_line_length": 32.77743902439025,
"alnum_prop": 0.5081387777881128,
"repo_name": "anythingrandom/eclcli",
"id": "25c995d302f24e4607e928b3d074cb985d5e0c32",
"size": "11364",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eclcli/identity/v3/project.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1647657"
}
],
"symlink_target": ""
} |
import json
import logging
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.conf import USE_NEW_EDITOR
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document, Document2
from oozie.models import Job, Node, Dataset
LOG = logging.getLogger(__name__)
def check_document_access_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = uuid = doc2 = None
try:
if request.REQUEST.get('workflow'):
workflow_id = request.REQUEST.get('workflow')
if workflow_id.isdigit():
doc_id = workflow_id
else:
uuid = workflow_id
elif request.GET.get('uuid'):
uuid = request.GET.get('uuid')
elif request.GET.get('coordinator'):
doc_id = request.GET.get('coordinator')
elif request.GET.get('bundle'):
doc_id = request.GET.get('bundle')
elif 'doc_id' in kwargs:
doc_id = kwargs['doc_id']
if doc_id and not doc_id.isdigit():
uuid = doc_id
doc_id = None
if doc_id is not None:
doc2 = Document2.objects.get(id=doc_id)
elif uuid is not None:
# TODO: The commented line should be used once we fully transition to doc2
# doc2 = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type=None)
doc2 = Document2.objects.filter(uuid=uuid).order_by('-last_modified').first()
if doc2:
if USE_NEW_EDITOR.get():
doc2.can_read_or_exception(request.user)
else:
doc2.doc.get().can_read_or_exception(request.user)
except Document2.DoesNotExist:
raise PopupException(_('Job with %(key)s=%(value)s does not exist') %
{'key': 'id' if doc_id else 'uuid', 'value': doc_id or uuid})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_document_modify_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = None
job = json.loads(request.POST.get('workflow', '{}'))
if not job:
job = json.loads(request.POST.get('coordinator', '{}'))
elif not job:
job = json.loads(request.POST.get('bundle', '{}'))
if job and job.get('id'):
doc_id = job.get('id')
try:
doc2 = Document2.objects.get(id=job['id'])
if USE_NEW_EDITOR.get():
doc2.can_write_or_exception(request.user)
else:
doc2.doc.get().can_write_or_exception(request.user)
except Document.DoesNotExist:
raise PopupException(_('Job %(id)s does not exist') % {'id': doc_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_editor_access_permission(view_func):
def decorate(request, *args, **kwargs):
if not request.user.is_superuser and request.user.has_hue_permission(action="disable_editor_access", app="oozie"):
raise PopupException(_('Missing permission to access the Oozie Editor'), error_code=401)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
## Oozie v1 below
def check_job_access_permission(exception_class=PopupException):
"""
Decorator ensuring that the user has access to the workflow or coordinator.
Arg: 'workflow' or 'coordinator' id.
Return: the workflow of coordinator or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None:
job = Job.objects.can_read_or_exception(request, job, exception_class=exception_class)
kwargs[job_type] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_job_edition_permission(authorize_get=False, exception_class=PopupException):
"""
Decorator ensuring that the user has the permissions to modify a workflow or coordinator.
Need to appear below @check_job_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, job, exception_class=exception_class)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_action_access_permission(view_func):
"""
Decorator ensuring that the user has access to the workflow action.
Arg: 'workflow action' id.
Return: the workflow action or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
action_id = kwargs.get('action')
action = Node.objects.get(id=action_id).get_full_node()
Job.objects.can_read_or_exception(request, action.workflow.id)
kwargs['action'] = action
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_action_edition_permission(view_func):
"""
Decorator ensuring that the user has the permissions to modify a workflow action.
Need to appear below @check_action_access_permission
"""
def decorate(request, *args, **kwargs):
action = kwargs.get('action')
Job.objects.can_edit_or_exception(request, action.workflow)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_access_permission(view_func):
"""
Decorator ensuring that the user has access to dataset.
Arg: 'dataset'.
Return: the dataset or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None:
dataset = Dataset.objects.can_read_or_exception(request, dataset)
kwargs['dataset'] = dataset
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_edition_permission(authorize_get=False):
"""
Decorator ensuring that the user has the permissions to modify a dataset.
A dataset can be edited if the coordinator that owns the dataset can be edited.
Need to appear below @check_dataset_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, dataset.coordinator)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
| {
"content_hash": "167176df8dd6000a0056209b0fde3458",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 118,
"avg_line_length": 32.51801801801802,
"alnum_prop": 0.6566006372073694,
"repo_name": "todaychi/hue",
"id": "04e106cb5e530c309373c7c81f851cec263c85ed",
"size": "8011",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/oozie/src/oozie/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2717013"
},
{
"name": "C++",
"bytes": "199945"
},
{
"name": "CSS",
"bytes": "691188"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23983570"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5432201"
},
{
"name": "Lex",
"bytes": "39802"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146585"
},
{
"name": "Mako",
"bytes": "3525679"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45877726"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46975"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "353353"
}
],
"symlink_target": ""
} |
import logging
from binascii import hexlify
from typing import Sequence, List
from ledger import error
from ledger.tree_hasher import TreeHasher
from ledger.util import STH
class MerkleVerifier(object):
"""A utility class for doing Merkle path computations."""
def __init__(self, hasher=TreeHasher()):
self.hasher = hasher
def __repr__(self):
return "%r(hasher: %r)" % (self.__class__.__name__, self.hasher)
def __str__(self):
return "%s(hasher: %s)" % (self.__class__.__name__, self.hasher)
@error.returns_true_or_raises
def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int,
old_root: bytes, new_root: bytes,
proof: Sequence[bytes]):
"""Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid.
"""
old_size = old_tree_size
new_size = new_tree_size
if old_size < 0 or new_size < 0:
raise ValueError("Negative tree size")
if old_size > new_size:
raise ValueError("Older tree has bigger size (%d vs %d), did "
"you supply inputs in the wrong order?" %
(old_size, new_size))
if old_size == new_size:
if old_root == new_root:
if proof:
logging.warning("Trees are identical, ignoring proof")
return True
else:
raise error.ConsistencyError("Inconsistency: different root "
"hashes for the same tree size")
if old_size == 0:
if proof:
# A consistency proof with an empty tree is an empty proof.
# Anything is consistent with an empty tree, so ignore whatever
# bogus proof was supplied. Note we do not verify here that the
# root hash is a valid hash for an empty tree.
logging.warning("Ignoring non-empty consistency proof for "
"empty tree.")
return True
# Now 0 < old_size < new_size
# A consistency proof is essentially an audit proof for the node with
# index old_size - 1 in the newer tree. The sole difference is that
# the path is already hashed together into a single hash up until the
# first audit node that occurs in the newer tree only.
node = old_size - 1
last_node = new_size - 1
# While we are the right child, everything is in both trees, so move one
# level up.
while node % 2:
node //= 2
last_node //= 2
p = iter(proof)
try:
if node:
# Compute the two root hashes in parallel.
new_hash = old_hash = next(p)
else:
# The old tree was balanced (2**k nodes), so we already have
# the first root hash.
new_hash = old_hash = old_root
while node:
if node % 2:
# node is a right child: left sibling exists in both trees.
next_node = next(p)
old_hash = self.hasher.hash_children(next_node, old_hash)
new_hash = self.hasher.hash_children(next_node, new_hash)
elif node < last_node:
# node is a left child: right sibling only exists in the
# newer tree.
new_hash = self.hasher.hash_children(new_hash, next(p))
# else node == last_node: node is a left child with no sibling
# in either tree.
node //= 2
last_node //= 2
# Now old_hash is the hash of the first subtree. If the two trees
# have different height, continue the path until the new root.
while last_node:
n = next(p)
new_hash = self.hasher.hash_children(new_hash, n)
last_node //= 2
# If the second hash does not match, the proof is invalid for the
# given pair. If, on the other hand, the newer hash matches but the
# older one doesn't, then the proof (together with the signatures
# on the hashes) is proof of inconsistency.
# Continue to find out.
if new_hash != new_root:
raise error.ProofError("Bad Merkle proof: second root hash "
"does not match. Expected hash: %s "
", computed hash: %s" %
(hexlify(new_root).strip(),
hexlify(new_hash).strip()))
elif old_hash != old_root:
raise error.ConsistencyError("Inconsistency: first root hash "
"does not match. Expected hash: "
"%s, computed hash: %s" %
(hexlify(old_root).strip(),
hexlify(old_hash).strip())
)
except StopIteration:
raise error.ProofError("Merkle proof is too short")
# We've already verified consistency, so accept the proof even if
# there's garbage left over (but log a warning).
try:
next(p)
except StopIteration:
pass
else:
logging.warning("Proof has extra nodes")
return True
def _calculate_root_hash_from_audit_path(self, leaf_hash: bytes,
node_index: int,
audit_path: List[bytes],
tree_size: int):
calculated_hash = leaf_hash
last_node = tree_size - 1
while last_node > 0:
if not audit_path:
raise error.ProofError('Proof too short: left with node index '
'%d' % node_index)
if node_index % 2:
audit_hash = audit_path.pop(0)
calculated_hash = self.hasher.hash_children(
audit_hash, calculated_hash)
elif node_index < last_node:
audit_hash = audit_path.pop(0)
calculated_hash = self.hasher.hash_children(
calculated_hash, audit_hash)
# node_index == last_node and node_index is even: A sibling does
# not exist. Go further up the tree until node_index is odd so
# calculated_hash will be used as the right-hand operand.
node_index //= 2
last_node //= 2
if audit_path:
raise error.ProofError('Proof too long: Left with %d hashes.' %
len(audit_path))
return calculated_hash
@classmethod
def audit_path_length(cls, index: int, tree_size: int):
length = 0
last_node = tree_size - 1
while last_node > 0:
if index % 2 or index < last_node:
length += 1
index //= 2
last_node //= 2
return length
@error.returns_true_or_raises
def verify_leaf_hash_inclusion(self, leaf_hash: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
"""Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf_hash: The hash of the leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit path.
sth: STH with the same tree size as the one used to fetch the proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
"""
leaf_index = int(leaf_index)
tree_size = int(sth.tree_size)
#TODO(eranm): Verify signature over STH
if tree_size <= leaf_index:
raise ValueError("Provided STH is for a tree that is smaller "
"than the leaf index. Tree size: %d Leaf "
"index: %d" % (tree_size, leaf_index))
if tree_size < 0 or leaf_index < 0:
raise ValueError("Negative tree size or leaf index: "
"Tree size: %d Leaf index: %d" %
(tree_size, leaf_index))
calculated_root_hash = self._calculate_root_hash_from_audit_path(
leaf_hash, leaf_index, proof[:], tree_size)
if calculated_root_hash == sth.sha256_root_hash:
return True
raise error.ProofError("Constructed root hash differs from provided "
"root hash. Constructed: %s Expected: %s" %
(hexlify(calculated_root_hash).strip(),
hexlify(sth.sha256_root_hash).strip()))
@error.returns_true_or_raises
def verify_leaf_inclusion(self, leaf: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
"""Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf: The leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit path.
sth: STH with the same tree size as the one used to fetch the proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
"""
leaf_hash = self.hasher.hash_leaf(leaf)
return self.verify_leaf_hash_inclusion(leaf_hash, leaf_index, proof,
sth)
| {
"content_hash": "20ca103fce80fb238a26e91a87a1a31a",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 42.52671755725191,
"alnum_prop": 0.5201938610662359,
"repo_name": "evernym/ledger",
"id": "48c0d77e4c18105d350a5d8df66a83f1b978d687",
"size": "11142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ledger/merkle_verifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "153084"
}
],
"symlink_target": ""
} |
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def _like_rnncell(cell):
"""Checks that a given object is an RNNCell by using duck typing."""
conditions = [hasattr(cell, "output_size"), hasattr(cell, "state_size"),
hasattr(cell, "zero_state"), callable(cell)]
return all(conditions)
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if context.in_graph_mode():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
with vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable):
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
trainable = (variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
state_size = self.state_size
return _zero_state_tensors(state_size, batch_size, dtype)
class BasicRNNCell(RNNCell):
"""The most basic RNN cell.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
def __init__(self, num_units, activation=None, reuse=None):
super(BasicRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(GRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = math_ops.sigmoid(
_linear([inputs, state], 2 * self._num_units, True, bias_ones,
self._kernel_initializer))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
with vs.variable_scope("candidate"):
c = self._activation(
_linear([inputs, r * state], self._num_units, True,
self._bias_initializer, self._kernel_initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
that follows.
"""
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(BasicLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size x self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size x 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=None, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(LSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
if self._num_unit_shards is not None:
unit_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_unit_shards))
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
if self._num_unit_shards is not None:
projection_scope.set_partitioner(None)
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
if self._num_proj_shards is not None:
proj_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_proj_shards))
m = _linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure,
enumerated_fn, *args, **kwargs)
def _default_dropout_state_filter_visitor(substate):
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None,
dropout_state_filter_visitor=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell.
**Note** the state components to which dropout is applied when
`state_keep_prob` is in `(0, 1)` are also determined by
the argument `dropout_state_filter_visitor` (e.g. by default dropout
is never applied to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns
a scalar or depth=1 structure of Python booleans describing
which terms in the state should be dropped out. In addition, if the
function returns `True`, dropout is applied across this sublevel. If
the function returns `False`, dropout is not applied across this entire
sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects:
```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState):
# Never perform dropout on the c state.
return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray):
return False
return True
```
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not a RNNCell.")
if (dropout_state_filter_visitor is not None
and not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d"
% (attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set cell, variational_recurrent, seed before running the code below
self._cell = cell
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(
([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
"""
self._cell = cell
self._residual_fn = residual_fn
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
| {
"content_hash": "707ed7bac25aacba85b4e35bcff4e660",
"timestamp": "",
"source": "github",
"line_count": 1131,
"max_line_length": 80,
"avg_line_length": 39.37577365163572,
"alnum_prop": 0.650648942381102,
"repo_name": "alivecor/tensorflow",
"id": "25a0ad0a37e33b0732e2ec038615e93d843a7def",
"size": "45223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/rnn_cell_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "198380"
},
{
"name": "C++",
"bytes": "29214526"
},
{
"name": "CMake",
"bytes": "640979"
},
{
"name": "Go",
"bytes": "971217"
},
{
"name": "Java",
"bytes": "407618"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38189"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "268983"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25693552"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "374053"
}
],
"symlink_target": ""
} |
''' The resources module provides the Resources class for easily configuring
how BokehJS code and CSS resources should be located, loaded, and embedded in
Bokeh documents.
Also provides some pre-configured Resources objects:
Attributes:
CDN : load minified BokehJS from CDN
INLINE : provide minified BokehJS from library static directory
'''
from os.path import abspath, join, normpath, realpath, relpath, split, splitext
import sys
import logging
logger = logging.getLogger(__name__)
from . import __version__, settings
def _server_static_dir():
return join(abspath(split(__file__)[0]), "server", "static")
def _static_path(path):
path = normpath(join(_server_static_dir(), path))
if sys.platform == 'cygwin': path = realpath(path)
return path
def _cdn_base_url():
return "http://cdn.pydata.org"
def _get_cdn_urls(version=None, minified=True):
if version is None:
version = __version__.split('-')[0]
min = ".min" if minified else ""
base_url = _cdn_base_url()
result = {
'js_files' : ['%s/bokeh-%s%s.js' % (base_url, version, min)],
'css_files' : ['%s/bokeh-%s%s.css' % (base_url, version, min)],
'messages' : [],
}
if len(__version__.split('-')) > 1:
result['messages'].append({
"type" : "warn",
"text" : "Requesting CDN BokehJS version '%s' from Bokeh development version '%s'. This configuration is unsupported and may not work!" % (version, __version__)
})
return result
def _get_server_urls(root_url, minified=True):
min = ".min" if minified else ""
result = {
'js_files' : ['%sbokehjs/static/js/bokeh%s.js' % (root_url, min)],
'css_files' : ['%sbokehjs/static/css/bokeh%s.css' % (root_url, min)],
'messages' : [],
}
return result
def _inline(paths):
strings = []
for path in paths:
begin = "/* BEGIN %s */" % path
middle = open(path, 'rb').read().decode("utf-8")
end = "/* END %s */" % path
strings.append(begin + '\n' + middle + '\n' + end)
return strings
def _file_paths(files, minified):
if minified:
files = [ root + ".min" + ext for (root, ext) in map(splitext, files) ]
return [ _static_path(file) for file in files ]
class Resources(object):
''' The Resources class encapsulates information relating to loading or
embedding BokehJS code and CSS.
Args:
mode (str) : how should BokehJS be included in output
See below for descriptions of available modes
version (str, optional) : what version of BokejJS to load
Only valid with the ``'cdn'`` mode
root_dir (str, optional) : root directory for loading BokehJS resources
Only valid with ``'relative'`` and ``'relative-dev'`` modes
minified (bool, optional) : whether JavaScript and CSS should be minified or not (default: True)
host (str, optional) : URL and port of Bokeh Server to load resources from
Only valid with ``'server'`` and ``'server-dev'`` modes
The following **mode** values are available for configuring a Resource object:
* ``'inline'`` configure to provide entire BokehJS code and CSS inline
* ``'cdn'`` configure to load BokehJS code and CS from ``http://cdn.pydata.org``
* ``'server'`` configure to load from a Bokeh Server
* ``'server-dev'`` same as ``server`` but supports non-concatenated JS using ``requirejs``
* ``'relative'`` configure to load relative to the given directory
* ``'relative-dev'`` same as ``relative`` but supports non-concatenated JS using ``requirejs``
* ``'absolute'`` configure to load from the installed Bokeh library static directory
* ``'absolute-dev'`` same as ``absolute`` but supports non-concatenated JS using ``requirejs``
Once configured, a Resource object exposes the following public attributes:
Attributes:
logo_url : location of the BokehJS logo image
js_raw : any raw JS that needs to be placed inside ``<script>`` tags
css_raw : any raw CSS that needs to be places inside ``<style>`` tags
js_files : URLs of any JS files that need to be loaded by ``<script>`` tags
css_files : URLS od any CSS files that need to be loaed by ``<link>`` tags
messages : any informational messages concering this configuration
These attributes are often useful as template parameters when embedding
Bokeh plots.
'''
_default_js_files = ["js/bokeh.js"]
_default_css_files = ["css/bokeh.css"]
_default_js_files_dev = ['js/vendor/requirejs/require.js', 'js/config.js']
_default_css_files_dev = ['css/bokeh-vendor.css', 'css/main.css']
_default_root_dir = "."
_default_root_url = "http://127.0.0.1:5006"
logo_url = "http://bokeh.pydata.org/_static/bokeh-transparent.png"
def __init__(self, mode='inline', version=None, root_dir=None,
minified=True, root_url=None):
self.mode = settings.resources(mode)
self.root_dir = settings.rootdir(root_dir)
self.version = settings.version(version)
self.minified = settings.minified(minified)
if root_url and not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self._root_url = root_url
if mode not in ['inline', 'cdn', 'server', 'server-dev', 'relative', 'relative-dev', 'absolute', 'absolute-dev']:
raise ValueError("wrong value for 'mode' parameter, expected 'inline', 'cdn', 'server', 'server-dev', 'relative(-dev)' or 'absolute(-dev)', got %r" % self.mode)
if self.root_dir and not mode.startswith("relative"):
raise ValueError("setting 'root_dir' makes sense only when 'mode' is set to 'relative'")
if self.version and not mode.startswith('cdn'):
raise ValueError("setting 'version' makes sense only when 'mode' is set to 'cdn'")
if root_url and not mode.startswith('server'):
raise ValueError("setting 'root_url' makes sense only when 'mode' is set to 'server'")
self.dev = self.mode.endswith('-dev')
if self.dev:
self.mode = self.mode[:-4]
js_paths = self._js_paths(dev=self.dev, minified=self.minified)
css_paths = self._css_paths(dev=self.dev, minified=self.minified)
base_url = _static_path("js")
self.js_raw = []
self.css_raw = []
self.js_files = []
self.css_files = []
self.messages = []
if self.mode == "inline":
self.js_raw = _inline(js_paths)
self.css_raw = _inline(css_paths)
elif self.mode == "relative":
root_dir = self.root_dir or self._default_root_dir
self.js_files = [ relpath(p, root_dir) for p in js_paths ]
self.css_files = [ relpath(p, root_dir) for p in css_paths ]
base_url = relpath(base_url, root_dir)
elif self.mode == "absolute":
self.js_files = list(js_paths)
self.css_files = list(css_paths)
elif self.mode == "cdn":
cdn = _get_cdn_urls(self.version, self.minified)
self.js_files = list(cdn['js_files'])
self.css_files = list(cdn['css_files'])
self.messages.extend(cdn['messages'])
elif self.mode == "server":
server = _get_server_urls(self.root_url, self.minified)
self.js_files = list(server['js_files'])
self.css_files = list(server['css_files'])
self.messages.extend(server['messages'])
if self.dev:
require = 'require.config({ baseUrl: "%s" });' % base_url
self.js_raw.append(require)
@property
def root_url(self):
if self._root_url:
return self._root_url
else:
return self._default_root_url
@property
def conn_string(self):
return self.root_url.replace("http", "ws", 1) + "bokeh/sub"
def _js_paths(self, minified=True, dev=False):
files = self._default_js_files_dev if self.dev else self._default_js_files
return _file_paths(files, False if dev else minified)
def _css_paths(self, minified=True, dev=False):
files = self._default_css_files_dev if self.dev else self._default_css_files
return _file_paths(files, False if dev else minified)
@property
def js_wrapper(self):
def pad(text, n=4):
return "\n".join([ " "*n + line for line in text.split("\n") ])
wrapper = lambda code: '$(function() {\n%s\n});' % pad(code)
if self.dev:
js_wrapper = lambda code: 'require(["jquery", "main"], function($, Bokeh) {\n%s\n});' % pad(wrapper(code))
else:
js_wrapper = wrapper
return js_wrapper
def _autoload_path(self, elementid):
return self.root_url + "bokeh/autoload.js/%s" % elementid
CDN = Resources(mode="cdn")
INLINE = Resources(mode="inline")
| {
"content_hash": "59907ba9e95c1bb109477dcba5b3ce31",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 172,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6065754633715799,
"repo_name": "sahat/bokeh",
"id": "0812bceb26eb6040cb827d1a7d256550ff7c6bd7",
"size": "9064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "329134"
},
{
"name": "CoffeeScript",
"bytes": "2099237"
},
{
"name": "JavaScript",
"bytes": "2683660"
},
{
"name": "Python",
"bytes": "973217"
},
{
"name": "Scala",
"bytes": "27312"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from wex.iterable import flatten
from operator import methodcaller
strip = methodcaller('strip')
def partition(separator, **kw):
""" Returns a function that yields tuples created by partitioning
text using `separator`.
"""
normalize_head = kw.pop('normalize_head', strip)
normalize_tail = kw.pop('normalize_tail', strip)
def _partition(obj):
for s in flatten(obj):
head, sep, tail = s.partition(separator)
if normalize_head:
head = normalize_head(head)
if normalize_tail:
tail = normalize_tail(tail)
if sep:
yield (head, tail)
return _partition
| {
"content_hash": "65f0194b26ac23fb9ec1a1c548f0da8e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 32.86363636363637,
"alnum_prop": 0.6127247579529738,
"repo_name": "eBay/wextracto",
"id": "7bf57e76139b4f00a3f2599919dc5dd485839e95",
"size": "723",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wex/string.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "9845"
},
{
"name": "Python",
"bytes": "188654"
}
],
"symlink_target": ""
} |
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| {
"content_hash": "16b950252219f953481d89a124380550",
"timestamp": "",
"source": "github",
"line_count": 2855,
"max_line_length": 111,
"avg_line_length": 35.33345008756567,
"alnum_prop": 0.5487970498726171,
"repo_name": "pyramania/scipy",
"id": "35f1735dbff718c94feba253a085be03dff326e0",
"size": "100877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/interpolate/interpolate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4119079"
},
{
"name": "C++",
"bytes": "491714"
},
{
"name": "FORTRAN",
"bytes": "5574493"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11020282"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
class RemoveTable:
""" Generates an SQL command to drop a table """
def __init__(self, table_name):
if type(table_name) != str:
self._table_name = table_name.name
else:
self._table_name = table_name
@property
def table_name(self) -> str:
""" Returns the name of the table. """
return self._table_name
def __str__(self):
return 'DROP TABLE `%s`;' % self._table_name
| {
"content_hash": "1f0034b6209811c647a852d6b47430b5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 30.066666666666666,
"alnum_prop": 0.549889135254989,
"repo_name": "cmancone/mygrations",
"id": "31f781c0e0a0bd7507df99b66fbe171ecb474247",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mygrations/core/operations/remove_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "416430"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
import logging
from collections import defaultdict
from functools import partial
from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union
import sqlalchemy as sqla
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.security.sqla.models import User
from markupsafe import escape, Markup
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
Integer,
MetaData,
String,
Table,
Text,
UniqueConstraint,
)
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import relationship, sessionmaker, subqueryload
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.session import object_session
from sqlalchemy.sql import join, select
from sqlalchemy.sql.elements import BinaryExpression
from superset import app, ConnectorRegistry, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidColumn, DruidMetric
from superset.connectors.sqla.models import SqlMetric, TableColumn
from superset.extensions import cache_manager
from superset.models.helpers import AuditMixinNullable, ImportExportMixin
from superset.models.slice import Slice
from superset.models.tags import DashboardUpdater
from superset.models.user_attributes import UserAttribute
from superset.tasks.thumbnails import cache_dashboard_thumbnail
from superset.utils import core as utils
from superset.utils.decorators import debounce
from superset.utils.hashing import md5_sha_from_str
from superset.utils.urls import get_url_path
# pylint: disable=too-many-public-methods
metadata = Model.metadata # pylint: disable=no-member
config = app.config
logger = logging.getLogger(__name__)
def copy_dashboard(
_mapper: Mapper, connection: Connection, target: "Dashboard"
) -> None:
dashboard_id = config["DASHBOARD_TEMPLATE_ID"]
if dashboard_id is None:
return
session_class = sessionmaker(autoflush=False)
session = session_class(bind=connection)
new_user = session.query(User).filter_by(id=target.id).first()
# copy template dashboard to user
template = session.query(Dashboard).filter_by(id=int(dashboard_id)).first()
dashboard = Dashboard(
dashboard_title=template.dashboard_title,
position_json=template.position_json,
description=template.description,
css=template.css,
json_metadata=template.json_metadata,
slices=template.slices,
owners=[new_user],
)
session.add(dashboard)
session.commit()
# set dashboard as the welcome dashboard
extra_attributes = UserAttribute(
user_id=target.id, welcome_dashboard_id=dashboard.id
)
session.add(extra_attributes)
session.commit()
sqla.event.listen(User, "after_insert", copy_dashboard)
dashboard_slices = Table(
"dashboard_slices",
metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
Column("slice_id", Integer, ForeignKey("slices.id")),
UniqueConstraint("dashboard_id", "slice_id"),
)
dashboard_user = Table(
"dashboard_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("dashboard_id", Integer, ForeignKey("dashboards.id")),
)
DashboardRoles = Table(
"dashboard_roles",
metadata,
Column("id", Integer, primary_key=True),
Column("dashboard_id", Integer, ForeignKey("dashboards.id"), nullable=False),
Column("role_id", Integer, ForeignKey("ab_role.id"), nullable=False),
)
class Dashboard( # pylint: disable=too-many-instance-attributes
Model, AuditMixinNullable, ImportExportMixin
):
"""The dashboard object!"""
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(utils.MediumText())
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(Slice, secondary=dashboard_slices, backref="dashboards")
owners = relationship(security_manager.user_model, secondary=dashboard_user)
published = Column(Boolean, default=False)
roles = relationship(security_manager.role_model, secondary=DashboardRoles)
export_fields = [
"dashboard_title",
"position_json",
"json_metadata",
"description",
"css",
"slug",
]
def __repr__(self) -> str:
return f"Dashboard<{self.id or self.slug}>"
@property
def url(self) -> str:
return f"/superset/dashboard/{self.slug or self.id}/"
@property
def datasources(self) -> Set[BaseDatasource]:
# Verbose but efficient database enumeration of dashboard datasources.
datasources_by_cls_model: Dict[Type["BaseDatasource"], Set[int]] = defaultdict(
set
)
for slc in self.slices:
datasources_by_cls_model[slc.cls_model].add(slc.datasource_id)
return {
datasource
for cls_model, datasource_ids in datasources_by_cls_model.items()
for datasource in db.session.query(cls_model)
.filter(cls_model.id.in_(datasource_ids))
.all()
}
@property
def charts(self) -> List[BaseDatasource]:
return [slc.chart for slc in self.slices]
@property
def sqla_metadata(self) -> None:
# pylint: disable=no-member
meta = MetaData(bind=self.get_sqla_engine())
meta.reflect()
@property
def status(self) -> utils.DashboardStatus:
if self.published:
return utils.DashboardStatus.PUBLISHED
return utils.DashboardStatus.DRAFT
@renders("dashboard_title")
def dashboard_link(self) -> Markup:
title = escape(self.dashboard_title or "<empty>")
return Markup(f'<a href="{self.url}">{title}</a>')
@property
def digest(self) -> str:
"""
Returns a MD5 HEX digest that makes this dashboard unique
"""
unique_string = f"{self.position_json}.{self.css}.{self.json_metadata}"
return md5_sha_from_str(unique_string)
@property
def thumbnail_url(self) -> str:
"""
Returns a thumbnail URL with a HEX digest. We want to avoid browser cache
if the dashboard has changed
"""
return f"/api/v1/dashboard/{self.id}/thumbnail/{self.digest}/"
@property
def changed_by_name(self) -> str:
if not self.changed_by:
return ""
return str(self.changed_by)
@property
def changed_by_url(self) -> str:
if not self.changed_by:
return ""
return f"/superset/profile/{self.changed_by.username}"
@property
def data(self) -> Dict[str, Any]:
positions = self.position_json
if positions:
positions = json.loads(positions)
return {
"id": self.id,
"metadata": self.params_dict,
"css": self.css,
"dashboard_title": self.dashboard_title,
"published": self.published,
"slug": self.slug,
"slices": [slc.data for slc in self.slices],
"position_json": positions,
"last_modified_time": self.changed_on.replace(microsecond=0).timestamp(),
}
@cache_manager.cache.memoize(
# manage cache version manually
make_name=lambda fname: f"{fname}-v1.0",
unless=lambda: not is_feature_enabled("DASHBOARD_CACHE"),
)
def datasets_trimmed_for_slices(self) -> List[Dict[str, Any]]:
# Verbose but efficient database enumeration of dashboard datasources.
slices_by_datasource: Dict[
Tuple[Type["BaseDatasource"], int], Set[Slice]
] = defaultdict(set)
for slc in self.slices:
slices_by_datasource[(slc.cls_model, slc.datasource_id)].add(slc)
result: List[Dict[str, Any]] = []
for (cls_model, datasource_id), slices in slices_by_datasource.items():
datasource = (
db.session.query(cls_model).filter_by(id=datasource_id).one_or_none()
)
if datasource:
# Filter out unneeded fields from the datasource payload
result.append(datasource.data_for_slices(slices))
return result
@property # type: ignore
def params(self) -> str: # type: ignore
return self.json_metadata
@params.setter
def params(self, value: str) -> None:
self.json_metadata = value
@property
def position(self) -> Dict[str, Any]:
if self.position_json:
return json.loads(self.position_json)
return {}
def update_thumbnail(self) -> None:
url = get_url_path("Superset.dashboard", dashboard_id_or_slug=self.id)
cache_dashboard_thumbnail.delay(url, self.digest, force=True)
@debounce(0.1)
def clear_cache(self) -> None:
cache_manager.cache.delete_memoized(Dashboard.datasets_trimmed_for_slices, self)
@classmethod
@debounce(0.1)
def clear_cache_for_slice(cls, slice_id: int) -> None:
filter_query = select([dashboard_slices.c.dashboard_id], distinct=True).where(
dashboard_slices.c.slice_id == slice_id
)
for (dashboard_id,) in db.engine.execute(filter_query):
cls(id=dashboard_id).clear_cache()
@classmethod
@debounce(0.1)
def clear_cache_for_datasource(cls, datasource_id: int) -> None:
filter_query = select(
[dashboard_slices.c.dashboard_id], distinct=True,
).select_from(
join(
dashboard_slices,
Slice,
(Slice.id == dashboard_slices.c.slice_id)
& (Slice.datasource_id == datasource_id),
)
)
for (dashboard_id,) in db.engine.execute(filter_query):
cls(id=dashboard_id).clear_cache()
@classmethod
def export_dashboards( # pylint: disable=too-many-locals
cls, dashboard_ids: List[int]
) -> str:
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id)
.first()
)
# remove ids and relations (like owners, created by, slices, ...)
copied_dashboard = dashboard.copy()
for slc in dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
copied_slc = slc.copy()
# save original id into json
# we need it to update dashboard's json metadata on import
copied_slc.id = slc.id
# add extra params for the import
copied_slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.datasource_name,
schema=slc.datasource.schema,
database_name=slc.datasource.database.name,
)
# set slices without creating ORM relations
slices = copied_dashboard.__dict__.setdefault("slices", [])
slices.append(copied_slc)
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration: List[Dict[str, Any]] = json_metadata.get(
"native_filter_configuration", []
)
for native_filter in native_filter_configuration:
session = db.session()
for target in native_filter.get("targets", []):
id_ = target.get("datasetId")
if id_ is None:
continue
datasource = ConnectorRegistry.get_datasource_by_id(session, id_)
datasource_ids.add((datasource.id, datasource.type))
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for datasource_id, datasource_type in datasource_ids:
eager_datasource = ConnectorRegistry.get_eager_datasource(
db.session, datasource_type, datasource_id
)
copied_datasource = eager_datasource.copy()
copied_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.name,
)
datasource_class = copied_datasource.__class__
for field_name in datasource_class.export_children:
field_val = getattr(eager_datasource, field_name).copy()
# set children without creating ORM relations
copied_datasource.__dict__[field_name] = field_val
eager_datasources.append(copied_datasource)
return json.dumps(
{"dashboards": copied_dashboards, "datasources": eager_datasources},
cls=utils.DashboardEncoder,
indent=4,
)
@classmethod
def get(cls, id_or_slug: str) -> Dashboard:
session = db.session()
qry = session.query(Dashboard).filter(id_or_slug_filter(id_or_slug))
return qry.one_or_none()
def id_or_slug_filter(id_or_slug: str) -> BinaryExpression:
if id_or_slug.isdigit():
return Dashboard.id == int(id_or_slug)
return Dashboard.slug == id_or_slug
OnDashboardChange = Callable[[Mapper, Connection, Dashboard], Any]
# events for updating tags
if is_feature_enabled("TAGGING_SYSTEM"):
sqla.event.listen(Dashboard, "after_insert", DashboardUpdater.after_insert)
sqla.event.listen(Dashboard, "after_update", DashboardUpdater.after_update)
sqla.event.listen(Dashboard, "after_delete", DashboardUpdater.after_delete)
if is_feature_enabled("THUMBNAILS_SQLA_LISTENERS"):
update_thumbnail: OnDashboardChange = lambda _, __, dash: dash.update_thumbnail()
sqla.event.listen(Dashboard, "after_insert", update_thumbnail)
sqla.event.listen(Dashboard, "after_update", update_thumbnail)
if is_feature_enabled("DASHBOARD_CACHE"):
def clear_dashboard_cache(
_mapper: Mapper,
_connection: Connection,
obj: Union[Slice, BaseDatasource, Dashboard],
check_modified: bool = True,
) -> None:
if check_modified and not object_session(obj).is_modified(obj):
# needed for avoiding excessive cache purging when duplicating a dashboard
return
if isinstance(obj, Dashboard):
obj.clear_cache()
elif isinstance(obj, Slice):
Dashboard.clear_cache_for_slice(slice_id=obj.id)
elif isinstance(obj, BaseDatasource):
Dashboard.clear_cache_for_datasource(datasource_id=obj.id)
elif isinstance(obj, (SqlMetric, TableColumn)):
Dashboard.clear_cache_for_datasource(datasource_id=obj.table_id)
elif isinstance(obj, (DruidMetric, DruidColumn)):
Dashboard.clear_cache_for_datasource(datasource_id=obj.datasource_id)
sqla.event.listen(Dashboard, "after_update", clear_dashboard_cache)
sqla.event.listen(
Dashboard, "after_delete", partial(clear_dashboard_cache, check_modified=False)
)
sqla.event.listen(Slice, "after_update", clear_dashboard_cache)
sqla.event.listen(Slice, "after_delete", clear_dashboard_cache)
sqla.event.listen(
BaseDatasource, "after_update", clear_dashboard_cache, propagate=True
)
# also clear cache on column/metric updates since updates to these will not
# trigger update events for BaseDatasource.
sqla.event.listen(SqlMetric, "after_update", clear_dashboard_cache)
sqla.event.listen(TableColumn, "after_update", clear_dashboard_cache)
sqla.event.listen(DruidMetric, "after_update", clear_dashboard_cache)
sqla.event.listen(DruidColumn, "after_update", clear_dashboard_cache)
| {
"content_hash": "36011ee0d8fda28b5b889ee2ef10cb05",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 88,
"avg_line_length": 36.54157303370786,
"alnum_prop": 0.6404280179570752,
"repo_name": "mistercrunch/panoramix",
"id": "d74efa72cde8f7a2be92c2c0257423fd1fbf23a7",
"size": "17046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/models/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "46750"
},
{
"name": "HTML",
"bytes": "34140"
},
{
"name": "JavaScript",
"bytes": "81606"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "240195"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("airports", "0003_auto_20190321_1026"),
]
operations = [
migrations.AlterField(
model_name="airport",
name="id",
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
]
| {
"content_hash": "a522599f9dbf4479b5bd1572b0af58f2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 108,
"avg_line_length": 24.5625,
"alnum_prop": 0.5979643765903307,
"repo_name": "bashu/django-airports",
"id": "0917ee0d174e96c58b80bdaef75e39d1462ab429",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "airports/migrations/0004_alter_airport_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1557"
},
{
"name": "Python",
"bytes": "33769"
}
],
"symlink_target": ""
} |
from __future__ import with_statement, absolute_import
import sys
import time
from contextlib import closing
import psycopg2
from psycopg2.extensions import QuotedString
from . import print_row_progress, status_logger
from .postgres_writer import PostgresWriter
class PostgresDbWriter(PostgresWriter):
"""Class used to stream DDL and/or data
from a MySQL server to a PostgreSQL.
:Parameters:
- `db_options`: :py:obj:`dict` containing connection specific variables
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
class FileObjFaker(object):
"""A file-like class to support streaming
table data directly to :py:meth:`pscopg2.copy_from`.
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `data`:
- `processor`:
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
def __init__(self, table, data, processor, verbose=False):
self.data = iter(data)
self.table = table
self.processor = processor
self.verbose = verbose
if verbose:
self.idx = 1
self.start_time = time.time()
self.prev_val_len = 0
self.prev_idx = 0
def readline(self, *args, **kwargs):
try:
row = list(self.data.next())
except StopIteration:
if self.verbose:
print('')
return ''
else:
self.processor(self.table, row)
try:
return '%s\n' % ('\t'.join(row))
except UnicodeDecodeError:
return '%s\n' % ('\t'.join(row)).decode('utf-8')
finally:
if self.verbose:
if (self.idx % 20000) == 0:
now = time.time()
elapsed = now - self.start_time
val = '%.2f rows/sec [%s] ' % ((self.idx - self.prev_idx) / elapsed, self.idx)
print_row_progress('%s%s' % (("\b" * self.prev_val_len), val)),
self.prev_val_len = len(val) + 3
self.start_time = now
self.prev_idx = self.idx + 0
self.idx += 1
def read(self, *args, **kwargs):
return self.readline(*args, **kwargs)
def __init__(self, db_options, verbose=False):
self.verbose = verbose
self.db_options = {
'host': db_options['hostname'],
'port': db_options.get('port', 5432),
'database': db_options['database'],
'password': db_options.get('password', None) or '',
'user': db_options['username'],
}
if ':' in db_options['database']:
self.db_options['database'], self.schema = self.db_options['database'].split(':')
else:
self.schema = None
self.open()
def open(self):
self.conn = psycopg2.connect(**self.db_options)
with closing(self.conn.cursor()) as cur:
if self.schema:
cur.execute('SET search_path TO %s' % self.schema)
cur.execute('SET client_encoding = \'UTF8\'')
if self.conn.server_version >= 80200:
cur.execute('SET standard_conforming_strings = off')
cur.execute('SET check_function_bodies = false')
cur.execute('SET client_min_messages = warning')
def query(self, sql, args=(), one=False):
with closing(self.conn.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone() if one else cur
def execute(self, sql, args=(), many=False):
with closing(self.conn.cursor()) as cur:
if many:
cur.executemany(sql, args)
else:
cur.execute(sql, args)
self.conn.commit()
def copy_from(self, file_obj, table_name, columns):
with closing(self.conn.cursor()) as cur:
cur.copy_from(file_obj,
table=table_name,
columns=columns
)
self.conn.commit()
def close(self):
"""Closes connection to the PostgreSQL server"""
self.conn.close()
def exists(self, relname):
rc = self.query('SELECT COUNT(!) FROM pg_class WHERE relname = %s', (relname, ), one=True)
return rc and int(rc[0]) == 1
@status_logger
def truncate(self, table):
"""Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
truncate_sql, serial_key_sql = super(self.__class__, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
@status_logger
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql = super(self.__class__, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql)
@status_logger
def write_indexes(self, table):
"""Send DDL to create the specified `table` indexes
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(self.__class__, self).write_indexes(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_constraints(self, table):
"""Send DDL to create the specified `table` constraints
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
constraint_sql = super(self.__class__, self).write_constraints(table)
for sql in constraint_sql:
self.execute(sql)
@status_logger
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
| {
"content_hash": "e5e9e280c87adba2fb81a3f6994dd03c",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 144,
"avg_line_length": 37.056994818652846,
"alnum_prop": 0.5573266219239373,
"repo_name": "anentropic/py-mysql2pgsql",
"id": "5ebace1c06ac06257a9425c2ec736b222c04b566",
"size": "7152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysql2pgsql/lib/postgres_db_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64070"
},
{
"name": "Shell",
"bytes": "4527"
}
],
"symlink_target": ""
} |
import json
import time
import unittest2
from unittest2 import SkipTest
import test.functional as tf
from copy import deepcopy
from test.functional.tests import Base, Base2, BaseEnv, Utils
from test.functional import cluster_info
from test.functional.swift_test_client import Account, Connection, \
ResponseError
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestObjectVersioningEnv(BaseEnv):
versioning_enabled = None # tri-state: None initially, then True/False
location_header_key = 'X-Versions-Location'
account2 = None
@classmethod
def setUp(cls):
super(TestObjectVersioningEnv, cls).setUp()
# Second connection for ACL tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.versions_container = cls.account.container(prefix + "-versions")
if not cls.versions_container.create():
raise ResponseError(cls.conn.response)
cls.container = cls.account.container(prefix + "-objs")
container_headers = {
cls.location_header_key: cls.versions_container.name}
if not cls.container.create(hdrs=container_headers):
if cls.conn.response.status == 412:
cls.versioning_enabled = False
return
raise ResponseError(cls.conn.response)
container_info = cls.container.info()
# if versioning is off, then cls.location_header_key won't persist
cls.versioning_enabled = 'versions' in container_info
# setup another account to test ACLs
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
# setup another account with no access to anything to test ACLs
config3 = deepcopy(tf.config)
config3['account'] = tf.config['account']
config3['username'] = tf.config['username3']
config3['password'] = tf.config['password3']
cls.conn3 = Connection(config3)
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
cls.account3 = cls.conn3.get_account()
@classmethod
def tearDown(cls):
if cls.account:
cls.account.delete_containers()
if cls.account2:
cls.account2.delete_containers()
class TestCrossPolicyObjectVersioningEnv(BaseEnv):
# tri-state: None initially, then True/False
versioning_enabled = None
multiple_policies_enabled = None
policies = None
location_header_key = 'X-Versions-Location'
account2 = None
@classmethod
def setUp(cls):
super(TestCrossPolicyObjectVersioningEnv, cls).setUp()
if cls.multiple_policies_enabled is None:
try:
cls.policies = tf.FunctionalStoragePolicyCollection.from_info()
except AssertionError:
pass
if cls.policies and len(cls.policies) > 1:
cls.multiple_policies_enabled = True
else:
cls.multiple_policies_enabled = False
cls.versioning_enabled = True
# We don't actually know the state of versioning, but without
# multiple policies the tests should be skipped anyway. Claiming
# versioning support lets us report the right reason for skipping.
return
policy = cls.policies.select()
version_policy = cls.policies.exclude(name=policy['name']).select()
# Second connection for ACL tests
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.versions_container = cls.account.container(prefix + "-versions")
if not cls.versions_container.create(
{'X-Storage-Policy': policy['name']}):
raise ResponseError(cls.conn.response)
cls.container = cls.account.container(prefix + "-objs")
if not cls.container.create(
hdrs={cls.location_header_key: cls.versions_container.name,
'X-Storage-Policy': version_policy['name']}):
if cls.conn.response.status == 412:
cls.versioning_enabled = False
return
raise ResponseError(cls.conn.response)
container_info = cls.container.info()
# if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info
# setup another account to test ACLs
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
# setup another account with no access to anything to test ACLs
config3 = deepcopy(tf.config)
config3['account'] = tf.config['account']
config3['username'] = tf.config['username3']
config3['password'] = tf.config['password3']
cls.conn3 = Connection(config3)
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
cls.account3 = cls.conn3.get_account()
@classmethod
def tearDown(cls):
if cls.account:
cls.account.delete_containers()
if cls.account2:
cls.account2.delete_containers()
class TestObjectVersioningHistoryModeEnv(TestObjectVersioningEnv):
location_header_key = 'X-History-Location'
class TestObjectVersioning(Base):
env = TestObjectVersioningEnv
def setUp(self):
super(TestObjectVersioning, self).setUp()
if self.env.versioning_enabled is False:
raise SkipTest("Object versioning not enabled")
elif self.env.versioning_enabled is not True:
# just some sanity checking
raise Exception(
"Expected versioning_enabled to be True/False, got %r" %
(self.env.versioning_enabled,))
def _tear_down_files(self):
try:
# only delete files and not containers
# as they were configured in self.env
self.env.versions_container.delete_files()
self.env.container.delete_files()
except ResponseError:
pass
def tearDown(self):
super(TestObjectVersioning, self).tearDown()
self._tear_down_files()
def test_clear_version_option(self):
# sanity
self.assertEqual(self.env.container.info()['versions'],
self.env.versions_container.name)
self.env.container.update_metadata(
hdrs={self.env.location_header_key: ''})
self.assertIsNone(self.env.container.info().get('versions'))
# set location back to the way it was
self.env.container.update_metadata(
hdrs={self.env.location_header_key:
self.env.versions_container.name})
self.assertEqual(self.env.container.info()['versions'],
self.env.versions_container.name)
def _test_overwriting_setup(self):
container = self.env.container
versions_container = self.env.versions_container
cont_info = container.info()
self.assertEqual(cont_info['versions'], versions_container.name)
expected_content_types = []
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
put_headers = {'Content-Type': 'text/jibberish01',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile'}
versioned_obj.write("aaaaa", hdrs=put_headers)
obj_info = versioned_obj.info()
self.assertEqual('text/jibberish01', obj_info['content_type'])
expected_content_types.append('text/jibberish01')
# the allowed headers are configurable in object server, so we cannot
# assert that content-encoding or content-disposition get *copied* to
# the object version unless they were set on the original PUT, so
# populate expected_headers by making a HEAD on the original object
resp_headers = dict(versioned_obj.conn.response.getheaders())
expected_headers = {}
for k, v in put_headers.items():
if k.lower() in resp_headers:
expected_headers[k] = v
self.assertEqual(0, versions_container.info()['object_count'])
versioned_obj.write("bbbbb", hdrs={'Content-Type': 'text/jibberish02',
'X-Object-Meta-Foo': 'Bar'})
versioned_obj.initialize()
self.assertEqual(versioned_obj.content_type, 'text/jibberish02')
expected_content_types.append('text/jibberish02')
self.assertEqual(versioned_obj.metadata['foo'], 'Bar')
# the old version got saved off
self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[0]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual("aaaaa", prev_version.read())
self.assertEqual(prev_version.content_type, 'text/jibberish01')
resp_headers = dict(prev_version.conn.response.getheaders())
for k, v in expected_headers.items():
self.assertIn(k.lower(), resp_headers)
self.assertEqual(v, resp_headers[k.lower()])
# make sure the new obj metadata did not leak to the prev. version
self.assertNotIn('foo', prev_version.metadata)
# check that POST does not create a new version
versioned_obj.sync_metadata(metadata={'fu': 'baz'})
self.assertEqual(1, versions_container.info()['object_count'])
expected_content_types.append('text/jibberish02')
# if we overwrite it again, there are two versions
versioned_obj.write("ccccc")
self.assertEqual(2, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[1]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual("bbbbb", prev_version.read())
self.assertEqual(prev_version.content_type, 'text/jibberish02')
self.assertNotIn('foo', prev_version.metadata)
self.assertIn('fu', prev_version.metadata)
# versioned_obj keeps the newest content
self.assertEqual("ccccc", versioned_obj.read())
# test copy from a different container
src_container = self.env.account.container(Utils.create_name())
self.assertTrue(src_container.create())
src_name = Utils.create_name()
src_obj = src_container.file(src_name)
src_obj.write("ddddd", hdrs={'Content-Type': 'text/jibberish04'})
src_obj.copy(container.name, obj_name)
self.assertEqual("ddddd", versioned_obj.read())
versioned_obj.initialize()
self.assertEqual(versioned_obj.content_type, 'text/jibberish04')
expected_content_types.append('text/jibberish04')
# make sure versions container has the previous version
self.assertEqual(3, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[2]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual("ccccc", prev_version.read())
# for further use in the mode-specific tests
return (versioned_obj, expected_headers, expected_content_types)
def test_overwriting(self):
versions_container = self.env.versions_container
versioned_obj, expected_headers, expected_content_types = \
self._test_overwriting_setup()
# pop one for the current version
expected_content_types.pop()
self.assertEqual(expected_content_types, [
o['content_type'] for o in versions_container.files(
parms={'format': 'json'})])
# test delete
versioned_obj.delete()
self.assertEqual("ccccc", versioned_obj.read())
expected_content_types.pop()
self.assertEqual(expected_content_types, [
o['content_type'] for o in versions_container.files(
parms={'format': 'json'})])
versioned_obj.delete()
self.assertEqual("bbbbb", versioned_obj.read())
expected_content_types.pop()
self.assertEqual(expected_content_types, [
o['content_type'] for o in versions_container.files(
parms={'format': 'json'})])
versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read())
self.assertEqual(0, versions_container.info()['object_count'])
# verify that all the original object headers have been copied back
obj_info = versioned_obj.info()
self.assertEqual('text/jibberish01', obj_info['content_type'])
resp_headers = dict(versioned_obj.conn.response.getheaders())
for k, v in expected_headers.items():
self.assertIn(k.lower(), resp_headers)
self.assertEqual(v, resp_headers[k.lower()])
versioned_obj.delete()
self.assertRaises(ResponseError, versioned_obj.read)
def assert_most_recent_version(self, obj_name, content,
should_be_dlo=False):
archive_versions = self.env.versions_container.files(parms={
'prefix': '%03x%s/' % (len(obj_name), obj_name),
'reverse': 'yes'})
archive_file = self.env.versions_container.file(archive_versions[0])
self.assertEqual(content, archive_file.read())
resp_headers = dict(archive_file.conn.response.getheaders())
if should_be_dlo:
self.assertIn('x-object-manifest', resp_headers)
else:
self.assertNotIn('x-object-manifest', resp_headers)
def _test_versioning_dlo_setup(self):
container = self.env.container
versions_container = self.env.versions_container
obj_name = Utils.create_name()
for i in ('1', '2', '3'):
time.sleep(.01) # guarantee that the timestamp changes
obj_name_seg = obj_name + '/' + i
versioned_obj = container.file(obj_name_seg)
versioned_obj.write(i)
# immediately overwrite
versioned_obj.write(i + i)
self.assertEqual(3, versions_container.info()['object_count'])
man_file = container.file(obj_name)
# write a normal file first
man_file.write('old content')
# guarantee that the timestamp changes
time.sleep(.01)
# overwrite with a dlo manifest
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s/" %
(self.env.container.name, obj_name)})
self.assertEqual(4, versions_container.info()['object_count'])
self.assertEqual("112233", man_file.read())
self.assert_most_recent_version(obj_name, 'old content')
# overwrite the manifest with a normal file
man_file.write('new content')
self.assertEqual(5, versions_container.info()['object_count'])
# new most-recent archive is the dlo
self.assert_most_recent_version(obj_name, '112233', should_be_dlo=True)
return obj_name, man_file
def test_versioning_dlo(self):
obj_name, man_file = self._test_versioning_dlo_setup()
# verify that restore works properly
man_file.delete()
self.assertEqual(4, self.env.versions_container.info()['object_count'])
self.assertEqual("112233", man_file.read())
resp_headers = dict(man_file.conn.response.getheaders())
self.assertIn('x-object-manifest', resp_headers)
self.assert_most_recent_version(obj_name, 'old content')
man_file.delete()
self.assertEqual(3, self.env.versions_container.info()['object_count'])
self.assertEqual("old content", man_file.read())
def test_versioning_container_acl(self):
# create versions container and DO NOT give write access to account2
versions_container = self.env.account.container(Utils.create_name())
self.assertTrue(versions_container.create(hdrs={
'X-Container-Write': ''
}))
# check account2 cannot write to versions container
fail_obj_name = Utils.create_name()
fail_obj = versions_container.file(fail_obj_name)
self.assertRaises(ResponseError, fail_obj.write, "should fail",
cfg={'use_token': self.env.storage_token2})
# create container and give write access to account2
# don't set X-Versions-Location just yet
container = self.env.account.container(Utils.create_name())
self.assertTrue(container.create(hdrs={
'X-Container-Write': self.env.conn2.user_acl}))
# check account2 cannot set X-Versions-Location on container
self.assertRaises(ResponseError, container.update_metadata, hdrs={
self.env.location_header_key: versions_container},
cfg={'use_token': self.env.storage_token2})
# good! now let admin set the X-Versions-Location
# p.s.: sticking a 'x-remove' header here to test precedence
# of both headers. Setting the location should succeed.
self.assertTrue(container.update_metadata(hdrs={
'X-Remove-' + self.env.location_header_key[len('X-'):]:
versions_container,
self.env.location_header_key: versions_container}))
# write object twice to container and check version
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
self.assertTrue(versioned_obj.write("never argue with the data",
cfg={'use_token': self.env.storage_token2}))
self.assertEqual(versioned_obj.read(), "never argue with the data")
self.assertTrue(
versioned_obj.write("we don't have no beer, just tequila",
cfg={'use_token': self.env.storage_token2}))
self.assertEqual(versioned_obj.read(),
"we don't have no beer, just tequila")
self.assertEqual(1, versions_container.info()['object_count'])
# read the original uploaded object
for filename in versions_container.files():
backup_file = versions_container.file(filename)
break
self.assertEqual(backup_file.read(), "never argue with the data")
# user3 (some random user with no access to any of account1)
# tries to read from versioned container
self.assertRaises(ResponseError, backup_file.read,
cfg={'use_token': self.env.storage_token3})
# create an object user3 can try to copy
a2_container = self.env.account2.container(Utils.create_name())
a2_container.create(
hdrs={'X-Container-Read': self.env.conn3.user_acl},
cfg={'use_token': self.env.storage_token2})
a2_obj = a2_container.file(Utils.create_name())
self.assertTrue(a2_obj.write("unused",
cfg={'use_token': self.env.storage_token2}))
# user3 cannot write, delete, or copy to/from source container either
number_of_versions = versions_container.info()['object_count']
self.assertRaises(ResponseError, versioned_obj.write,
"some random user trying to write data",
cfg={'use_token': self.env.storage_token3})
self.assertEqual(number_of_versions,
versions_container.info()['object_count'])
self.assertRaises(ResponseError, versioned_obj.delete,
cfg={'use_token': self.env.storage_token3})
self.assertEqual(number_of_versions,
versions_container.info()['object_count'])
self.assertRaises(
ResponseError, versioned_obj.write,
hdrs={'X-Copy-From': '%s/%s' % (a2_container.name, a2_obj.name),
'X-Copy-From-Account': self.env.conn2.account_name},
cfg={'use_token': self.env.storage_token3})
self.assertEqual(number_of_versions,
versions_container.info()['object_count'])
self.assertRaises(
ResponseError, a2_obj.copy_account,
self.env.conn.account_name, container.name, obj_name,
cfg={'use_token': self.env.storage_token3})
self.assertEqual(number_of_versions,
versions_container.info()['object_count'])
# user2 can't read or delete from versions-location
self.assertRaises(ResponseError, backup_file.read,
cfg={'use_token': self.env.storage_token2})
self.assertRaises(ResponseError, backup_file.delete,
cfg={'use_token': self.env.storage_token2})
# but is able to delete from the source container
# this could be a helpful scenario for dev ops that want to setup
# just one container to hold object versions of multiple containers
# and each one of those containers are owned by different users
self.assertTrue(versioned_obj.delete(
cfg={'use_token': self.env.storage_token2}))
# tear-down since we create these containers here
# and not in self.env
a2_container.delete_recursive()
versions_container.delete_recursive()
container.delete_recursive()
def _test_versioning_check_acl_setup(self):
container = self.env.container
versions_container = self.env.versions_container
versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
versioned_obj.write("aaaaa")
self.assertEqual("aaaaa", versioned_obj.read())
versioned_obj.write("bbbbb")
self.assertEqual("bbbbb", versioned_obj.read())
# Use token from second account and try to delete the object
org_token = self.env.account.conn.storage_token
self.env.account.conn.storage_token = self.env.conn2.storage_token
try:
with self.assertRaises(ResponseError) as cm:
versioned_obj.delete()
self.assertEqual(403, cm.exception.status)
finally:
self.env.account.conn.storage_token = org_token
# Verify with token from first account
self.assertEqual("bbbbb", versioned_obj.read())
return versioned_obj
def test_versioning_check_acl(self):
versioned_obj = self._test_versioning_check_acl_setup()
versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read())
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
def tearDown(self):
self._tear_down_files()
super(TestObjectVersioningUTF8, self).tearDown()
class TestCrossPolicyObjectVersioning(TestObjectVersioning):
env = TestCrossPolicyObjectVersioningEnv
def setUp(self):
super(TestCrossPolicyObjectVersioning, self).setUp()
if self.env.multiple_policies_enabled is False:
raise SkipTest('Cross policy test requires multiple policies')
elif self.env.multiple_policies_enabled is not True:
# just some sanity checking
raise Exception("Expected multiple_policies_enabled "
"to be True/False, got %r" % (
self.env.versioning_enabled,))
class TestObjectVersioningHistoryMode(TestObjectVersioning):
env = TestObjectVersioningHistoryModeEnv
# those override tests includes assertions for delete versioned objects
# behaviors different from default object versioning using
# x-versions-location.
def test_overwriting(self):
versions_container = self.env.versions_container
versioned_obj, expected_headers, expected_content_types = \
self._test_overwriting_setup()
# test delete
# at first, delete will succeed with 204
versioned_obj.delete()
expected_content_types.append(
'application/x-deleted;swift_versions_deleted=1')
# after that, any time the delete doesn't restore the old version
# and we will get 404 NotFound
for x in range(3):
with self.assertRaises(ResponseError) as cm:
versioned_obj.delete()
self.assertEqual(404, cm.exception.status)
expected_content_types.append(
'application/x-deleted;swift_versions_deleted=1')
# finally, we have 4 versioned items and 4 delete markers total in
# the versions container
self.assertEqual(8, versions_container.info()['object_count'])
self.assertEqual(expected_content_types, [
o['content_type'] for o in versions_container.files(
parms={'format': 'json'})])
# update versioned_obj
versioned_obj.write("eeee", hdrs={'Content-Type': 'text/thanksgiving',
'X-Object-Meta-Bar': 'foo'})
# verify the PUT object is kept successfully
obj_info = versioned_obj.info()
self.assertEqual('text/thanksgiving', obj_info['content_type'])
# we still have delete-marker there
self.assertEqual(8, versions_container.info()['object_count'])
# update versioned_obj
versioned_obj.write("ffff", hdrs={'Content-Type': 'text/teriyaki',
'X-Object-Meta-Food': 'chickin'})
# verify the PUT object is kept successfully
obj_info = versioned_obj.info()
self.assertEqual('text/teriyaki', obj_info['content_type'])
# new obj will be inserted after delete-marker there
self.assertEqual(9, versions_container.info()['object_count'])
versioned_obj.delete()
with self.assertRaises(ResponseError) as cm:
versioned_obj.read()
self.assertEqual(404, cm.exception.status)
self.assertEqual(11, versions_container.info()['object_count'])
def test_versioning_dlo(self):
obj_name, man_file = \
self._test_versioning_dlo_setup()
man_file.delete()
with self.assertRaises(ResponseError) as cm:
man_file.read()
self.assertEqual(404, cm.exception.status)
self.assertEqual(7, self.env.versions_container.info()['object_count'])
expected = ['old content', '112233', 'new content', '']
bodies = [
self.env.versions_container.file(f).read()
for f in self.env.versions_container.files(parms={
'prefix': '%03x%s/' % (len(obj_name), obj_name)})]
self.assertEqual(expected, bodies)
def test_versioning_check_acl(self):
versioned_obj = self._test_versioning_check_acl_setup()
versioned_obj.delete()
with self.assertRaises(ResponseError) as cm:
versioned_obj.read()
self.assertEqual(404, cm.exception.status)
# we have 3 objects in the versions_container, 'aaaaa', 'bbbbb'
# and delete-marker with empty content
self.assertEqual(3, self.env.versions_container.info()['object_count'])
files = self.env.versions_container.files()
for actual, expected in zip(files, ['aaaaa', 'bbbbb', '']):
prev_version = self.env.versions_container.file(actual)
self.assertEqual(expected, prev_version.read())
class TestSloWithVersioning(unittest2.TestCase):
def setUp(self):
if 'slo' not in cluster_info:
raise SkipTest("SLO not enabled")
self.conn = Connection(tf.config)
self.conn.authenticate()
self.account = Account(
self.conn, tf.config.get('account', tf.config['username']))
self.account.delete_containers()
# create a container with versioning
self.versions_container = self.account.container(Utils.create_name())
self.container = self.account.container(Utils.create_name())
self.segments_container = self.account.container(Utils.create_name())
if not self.container.create(
hdrs={'X-Versions-Location': self.versions_container.name}):
raise ResponseError(self.conn.response)
if 'versions' not in self.container.info():
raise SkipTest("Object versioning not enabled")
for cont in (self.versions_container, self.segments_container):
if not cont.create():
raise ResponseError(self.conn.response)
# create some segments
self.seg_info = {}
for letter, size in (('a', 1024 * 1024),
('b', 1024 * 1024)):
seg_name = letter
file_item = self.segments_container.file(seg_name)
file_item.write(letter * size)
self.seg_info[seg_name] = {
'size_bytes': size,
'etag': file_item.md5,
'path': '/%s/%s' % (self.segments_container.name, seg_name)}
def _create_manifest(self, seg_name):
# create a manifest in the versioning container
file_item = self.container.file("my-slo-manifest")
file_item.write(
json.dumps([self.seg_info[seg_name]]),
parms={'multipart-manifest': 'put'})
return file_item
def _assert_is_manifest(self, file_item, seg_name):
manifest_body = file_item.read(parms={'multipart-manifest': 'get'})
resp_headers = dict(file_item.conn.response.getheaders())
self.assertIn('x-static-large-object', resp_headers)
self.assertEqual('application/json; charset=utf-8',
file_item.content_type)
try:
manifest = json.loads(manifest_body)
except ValueError:
self.fail("GET with multipart-manifest=get got invalid json")
self.assertEqual(1, len(manifest))
key_map = {'etag': 'hash', 'size_bytes': 'bytes', 'path': 'name'}
for k_client, k_slo in key_map.items():
self.assertEqual(self.seg_info[seg_name][k_client],
manifest[0][k_slo])
def _assert_is_object(self, file_item, seg_name):
file_contents = file_item.read()
self.assertEqual(1024 * 1024, len(file_contents))
self.assertEqual(seg_name, file_contents[0])
self.assertEqual(seg_name, file_contents[-1])
def tearDown(self):
# remove versioning to allow simple container delete
self.container.update_metadata(hdrs={'X-Versions-Location': ''})
self.account.delete_containers()
def test_slo_manifest_version(self):
file_item = self._create_manifest('a')
# sanity check: read the manifest, then the large object
self._assert_is_manifest(file_item, 'a')
self._assert_is_object(file_item, 'a')
# upload new manifest
file_item = self._create_manifest('b')
# sanity check: read the manifest, then the large object
self._assert_is_manifest(file_item, 'b')
self._assert_is_object(file_item, 'b')
versions_list = self.versions_container.files()
self.assertEqual(1, len(versions_list))
version_file = self.versions_container.file(versions_list[0])
# check the version is still a manifest
self._assert_is_manifest(version_file, 'a')
self._assert_is_object(version_file, 'a')
# delete the newest manifest
file_item.delete()
# expect the original manifest file to be restored
self._assert_is_manifest(file_item, 'a')
self._assert_is_object(file_item, 'a')
| {
"content_hash": "b1f6b327aebcfd87a32c7c3ac42bf0f1",
"timestamp": "",
"source": "github",
"line_count": 775,
"max_line_length": 79,
"avg_line_length": 42.36387096774194,
"alnum_prop": 0.6251218323586745,
"repo_name": "psachin/swift",
"id": "f09e43e956f940994ad5b9b72e72926f28527240",
"size": "33448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_versioned_writes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8807732"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pickle
# LOAD DATA
df = pd.read_csv('data/wheat-2013-supervised-edited.csv')
drop_cols = ['Latitude','Longitude'] + [df.columns[0]]
df.drop(drop_cols,axis=1,inplace=True)
# LOAD SBS FEATURES
with open('SBS_feat_set.plk','rb') as f:
sbs_dict = pickle.load(f)
# PARAMETER TUNER
def tuner(name,*params):
if name == 'Linear':
k, model = 8, RANSACRegressor(base_estimator=LinearRegression(),max_trials=100,min_samples=params[0])
X = np.matrix(df.ix[:,:-1])[:,list(sbs_dict[name][k])]
y = np.array(df.ix[:,-1])
X_std = StandardScaler().fit_transform(X)
y_std = StandardScaler().fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X_std,y_std,test_size=0.25,random_state=42)
elif name == 'Random Forest':
k, model = 4, RandomForestRegressor(n_estimators=params[0],max_depth=params[1],random_state=42)
X = np.matrix(df.ix[:,:-1])[:,list(sbs_dict[name][k])]
y = np.array(df.ix[:,-1])
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.25,random_state=42)
elif name == 'Gradient Boost':
k, model = 8, GradientBoostingRegressor(n_estimators=params[0],max_depth=params[1])
X = np.matrix(df.ix[:,:-1])[:,list(sbs_dict[name][k])]
y = np.array(df.ix[:,-1])
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.25,random_state=42)
results = model.fit(X_train,y_train)
train_score = np.mean(cross_val_score(results,X_train,y_train,cv=8))
test_score = results.score(X_test,y_test)
return train_score, test_score
# EMPTY DICTS (will fill in w/ train & test scores)
train_dict = {}
test_dict = {}
################################################################
################# LINEAR REGRESSION w/ RANDSAC #################
################################################################
name = 'Linear'
test_scores = []
train_scores= []
param_list = []
min_samp_size = 70000
max_samp_size = 110000
for param in np.arange(min_samp_size,max_samp_size+1,10000):
train_score, test_score = tuner(name,param)
train_scores.append(train_score)
test_scores.append(test_score)
param_list.append(param)
train_dict[name] = train_scores
test_dict[name] = test_scores
################################################################
################## RANDOM FOREST REGRESSOR #####################
################################################################
name = 'Random Forest'
test_scores = []
train_scores= []
param_list = []
min_trees,max_trees = 20,100
min_depth,max_depth = 1,5
for param1 in np.arange(min_trees,max_trees+1,20):
bin1 = []
bin2 = []
param_list.append(param1)
for param2 in np.arange(min_depth,max_depth+1,1):
train_score, test_score = tuner(name,param1,param2)
bin1.append(train_score)
bin2.append(test_score)
train_scores.append(np.array(bin1))
test_scores.append(np.array(bin2))
train_dict[name] = train_scores
test_dict[name] = test_scores
################################################################
################# GRADIENT BOOSTING REGRESSOR #################
################################################################
name = 'Gradient Boost'
test_scores = []
train_scores= []
param_list = []
min_trees,max_trees = 100,500
min_depth,max_depth = 1,5
for param1 in np.arange(min_trees,max_trees+1,100):
bin1 = []
bin2 = []
param_list.append(param1)
for param2 in np.arange(min_depth,max_depth+1,1):
train_score, test_score = tuner(name,param1,param2)
bin1.append(train_score)
bin2.append(test_score)
train_scores.append(np.array(bin1))
test_scores.append(np.array(bin2))
train_dict[name] = train_scores
test_dict[name] = test_scores
# SAVE SCORES!
with open('model_optimization_train_scores.plk','wb') as f:
pickle.dump(train_dict,f)
with open('model_optimization_test_scores.plk','wb') as f:
pickle.dump(test_dict,f) | {
"content_hash": "7950c4779dafa19c9b95c7dd52ed8197",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 109,
"avg_line_length": 37.62068965517241,
"alnum_prop": 0.5976168652612283,
"repo_name": "marwin-ko/projects",
"id": "78924849da70045a0ea10dd66702f89e34fc09b6",
"size": "4385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aerialintel-data_science_challenge/4_model_optimization-run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13855947"
},
{
"name": "Python",
"bytes": "21214"
}
],
"symlink_target": ""
} |
import ocw.utils as utils
import numpy as np
from scipy.stats import percentileofscore, linregress
class Downscaling:
def __init__(self, ref_dataset, model_present, model_future):
'''
:param ref_dataset: The Dataset to use as the reference dataset (observation)
:type ref_dataset: Dataset
:param model_present: model simulation to be compared with observation
:type model_present: Dataset
:param model_future: model simulation to be calibrated for prediction
:type model_future: Dataset
'''
self.ref_dataset = ref_dataset[~ref_dataset.mask].ravel()
self.model_present = model_present.ravel()
self.model_future = model_future.ravel()
description = "statistical downscaling methods"
def Delta_addition(self):
'''Calculate the mean difference between future and present simulation,
then add the difference to the observed distribution
:returns: downscaled model_present and model_future
'''
ref = self.ref_dataset
model_present = self.model_present
model_future = self.model_future
return model_present, ref + np.mean(model_future-model_present)
def Delta_correction(self):
'''Calculate the mean difference between observation and present simulation,
then add the difference to the future distribution
:returns: downscaled model_present and model_future
'''
ref = self.ref_dataset
model_present = self.model_present
model_future = self.model_future
return model_present+np.mean(ref) - np.mean(model_present), model_future + np.mean(ref) - np.mean(model_present)
def Quantile_mapping(self):
'''Remove the biases for each quantile value
Wood et al (2004) HYDROLOGIC IMPLICATIONS OF DYNAMICAL AND STATISTICAL APPROACHES TO DOWNSCALING CLIMATE MODEL OUTPUTS
:returns: downscaled model_present and model_future
'''
ref = self.ref_dataset
model_present = self.model_present
model_present_corrected = np.zeros(model_present.size)
model_future = self.model_future
model_future_corrected = np.zeros(model_future.size)
for ival, model_value in enumerate(model_present):
percentile = percentileofscore(model_present, model_value)
model_present_corrected[ival] = np.percentile(ref, percentile)
for ival, model_value in enumerate(model_future):
percentile = percentileofscore(model_future, model_value)
model_future_corrected[ival] = model_value + np.percentile(ref, percentile) - np.percentile(model_present, percentile)
return model_present_corrected, model_future_corrected
def Asynchronous_regression(self):
'''Remove the biases by fitting a linear regression model with ordered observational and model datasets
Stoner et al (2013) An asynchronous regional regression model for statistical downscaling of daily climate variables
:returns: downscaled model_present and model_future
'''
ref_original = self.ref_dataset
model_present = self.model_present
model_present_sorted = np.sort(model_present)
model_future = self.model_future
ref = np.zeros(model_present.size) # For linear regression, the size of reference data must be same as model data.
for ival, model_value in enumerate(model_present_sorted):
percentile = percentileofscore(model_present_sorted, model_value)
ref[ival] = np.percentile(ref_original, percentile)
slope, intercept = linregress(model_present_sorted, ref)[0:2]
return model_present*slope+intercept, model_future*slope+intercept
| {
"content_hash": "0231390a86902d26a8e4ebf09ee9f53a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 131,
"avg_line_length": 41.12903225806452,
"alnum_prop": 0.6739869281045752,
"repo_name": "MBoustani/climate",
"id": "75e2adc8ac9bc52e1afd066eca6d57d43d499bbd",
"size": "4613",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ocw/statistical_downscaling.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "CSS",
"bytes": "2516"
},
{
"name": "HTML",
"bytes": "36106"
},
{
"name": "JavaScript",
"bytes": "123324"
},
{
"name": "OpenEdge ABL",
"bytes": "286872"
},
{
"name": "Python",
"bytes": "731441"
},
{
"name": "Ruby",
"bytes": "541"
},
{
"name": "Shell",
"bytes": "15747"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0012_auto_20141020_1540'),
]
operations = [
migrations.AlterField(
model_name='outputurl',
name='slug',
field=models.SlugField(max_length=100),
),
]
| {
"content_hash": "8cee9948052dc5bbea57f3297d19c1db",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.5931758530183727,
"repo_name": "talumbau/webapp-public",
"id": "bae99a0bf84c44a23b0ee3cfb5b35d3d49466434",
"size": "405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0013_auto_20141021_1346.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61838"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "378874"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
"""This example adds an HTML5 ad to a given AdGroup.
To get ad_group_id, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import urllib2
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201802')
# Create HTML5 media.
html5_zip = GetHTML5ZipFromUrl('https://goo.gl/9Y7qI2')
# Create a media bundle containing the zip file with all the HTML5 components.
media_bundle = {
'xsi_type': 'MediaBundle',
'data': html5_zip,
'entryPoint': 'carousel/index.html',
'type': 'MEDIA_BUNDLE'
}
ad_data = {
'uniqueName': 'adData',
'fields': [
{
'name': 'Custom_layout',
'fieldMedia': media_bundle,
'type': 'MEDIA_BUNDLE'
},
{
'name': 'layout',
'fieldText': 'Custom',
'type': 'ENUM'
}
]
}
html5_ad = {
'xsi_type': 'TemplateAd',
'name': 'Ad for HTML5',
'templateId': 419,
'finalUrls': ['https://www.google.com'],
'displayUrl': 'www.google.com?tip=ENTER_YOUR_OWN_FINAL_AND_DISPLAY_URLS',
'dimensions': {
'width': '300',
'height': '250'
},
'templateElements': [ad_data]
}
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': html5_ad,
'status': 'PAUSED'
}
operations = [
{
'operator': 'ADD',
'operand': ad_group_ad
}
]
ads = ad_group_ad_service.mutate(operations)
# Display results.
for ad in ads['value']:
print ('New HTML5 Ad with id "%s" and of display url "%s" was added.'
% (ad['ad']['id'], ad['ad']['displayUrl']))
def GetHTML5ZipFromUrl(url):
"""Retrieve zip file from the given URL."""
response = urllib2.urlopen(url)
# Note: The utf-8 decode is for 2to3 Python 3 compatibility.
return response.read().decode('utf-8')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| {
"content_hash": "1ca7267568d718760e100f1fb41daa6f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 25.43617021276596,
"alnum_prop": 0.5938937682977834,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "d0f2810fcd44408add600f074c87752e894fca0b",
"size": "3013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201802/advanced_operations/add_html5_ad.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
from __future__ import print_function
__license__ = """
Copyright (c) 2012-2014, Uwe Schmitt, ETH Zurich, all rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the mineway GmbH nor the names of its contributors may be
used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def expect_exception(fun):
def wrapper(*a, **kw):
try:
fun(*a, **kw)
except Exception:
if 0:
print("info: expected excption. here some more info:")
import traceback
traceback.print_exc()
print
pass
else:
assert False, "%s did not raise exception" % fun
# set name, so that test frame work recognizes wrapped function
wrapper.__name__ = fun.__name__ + "__exception_wrapped"
return wrapper
| {
"content_hash": "084f35386ffe479ff180f8e9c8155f5a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 42.673469387755105,
"alnum_prop": 0.733142037302726,
"repo_name": "hendrik-cliqz/autowrap",
"id": "fcc69e51be96127dc914ac71c151c681b3b5f68d",
"size": "2091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2859168"
},
{
"name": "Python",
"bytes": "263659"
}
],
"symlink_target": ""
} |
import logging
from importer.management.commands._import_base_command import ImportBaseCommand
logger = logging.getLogger(__name__)
class Command(ImportBaseCommand):
help = "Import any oparl object by its id"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("ids", nargs="+", help="The oparl ids of the objects")
def handle(self, *args, **options):
importer, _body = self.get_importer(options)
for oparl_id in options["ids"]:
importer.import_anything(oparl_id).save()
| {
"content_hash": "9854858bba8403af70b612831fa3c84f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6773049645390071,
"repo_name": "meine-stadt-transparent/meine-stadt-transparent",
"id": "ce110892f3d95fb8b5929d54b9deb0dc9e3d9bd4",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "importer/management/commands/import_anything.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2397"
},
{
"name": "HTML",
"bytes": "158632"
},
{
"name": "JavaScript",
"bytes": "62206"
},
{
"name": "Python",
"bytes": "601144"
},
{
"name": "SCSS",
"bytes": "40214"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plataforma', '0005_auto_20170710_1346'),
]
operations = [
migrations.AddField(
model_name='historicalnodo',
name='contato_facebook',
field=models.URLField(blank=True, null=True, verbose_name='Facebook'),
),
migrations.AddField(
model_name='historicalnodo',
name='contato_whatsapp',
field=models.URLField(blank=True, null=True, verbose_name='WhatsApp'),
),
migrations.AddField(
model_name='historicalnodo',
name='contato_zoom',
field=models.URLField(blank=True, null=True, verbose_name='Zoom'),
),
migrations.AddField(
model_name='nodo',
name='contato_facebook',
field=models.URLField(blank=True, null=True, verbose_name='Facebook'),
),
migrations.AddField(
model_name='nodo',
name='contato_whatsapp',
field=models.URLField(blank=True, null=True, verbose_name='WhatsApp'),
),
migrations.AddField(
model_name='nodo',
name='contato_zoom',
field=models.URLField(blank=True, null=True, verbose_name='Zoom'),
),
]
| {
"content_hash": "bec873cca62c054912d24c4e89a28aba",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 32.27906976744186,
"alnum_prop": 0.5720461095100865,
"repo_name": "bodedev/prospera",
"id": "7879c7dad2304b70f50f48bd52ed7912deddf37c",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plataforma/migrations/0006_auto_20170710_1349.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25391"
},
{
"name": "HTML",
"bytes": "52767"
},
{
"name": "JavaScript",
"bytes": "4894"
},
{
"name": "Python",
"bytes": "66197"
}
],
"symlink_target": ""
} |
from trac.tests.functional import *
#TODO: split this into multiple smaller testcases
class TestPreferences(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Set preferences for admin user"""
prefs_url = self._tester.url + "/prefs"
tc.follow('Preferences')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.formvalue('userprefs', 'name', ' System Administrator ')
tc.formvalue('userprefs', 'email', ' admin@example.com ')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.formvalue('userprefs', 'tz', 'GMT -10:00')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('General')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.find('value="System Administrator"')
tc.find(r'value="admin@example\.com"')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.find('GMT -10:00')
class RegressionTestRev5785(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the fix in r5785"""
prefs_url = self._tester.url + "/prefs"
tc.follow('Preferences')
tc.url(prefs_url)
tc.follow('Logout')
tc.notfind(internal_error) # See [5785]
tc.follow('Login')
class RegressionTestTicket5765(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5765
Unable to turn off 'Enable access keys' in Preferences
"""
self._tester.go_to_front()
tc.follow('Preferences')
tc.follow('Keyboard Shortcuts')
tc.formvalue('userprefs', 'accesskeys', True)
tc.submit()
tc.find('name="accesskeys".*checked="checked"')
tc.formvalue('userprefs', 'accesskeys', False)
tc.submit()
tc.notfind('name="accesskeys".*checked="checked"')
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(TestPreferences())
suite.addTest(RegressionTestRev5785())
suite.addTest(RegressionTestTicket5765())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
| {
"content_hash": "c2fb790ebe618e1357e2f947fcde826f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 70,
"avg_line_length": 35.18840579710145,
"alnum_prop": 0.6293245469522241,
"repo_name": "dafrito/trac-mirror",
"id": "968d2346597e492ef6a3b70eeb158269beaea7e5",
"size": "2947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trac/prefs/tests/functional.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78549"
},
{
"name": "JavaScript",
"bytes": "80640"
},
{
"name": "Perl",
"bytes": "2616"
},
{
"name": "Python",
"bytes": "2978350"
},
{
"name": "Shell",
"bytes": "11207"
}
],
"symlink_target": ""
} |
from angr.storage.file import SimFile
from angr.storage.memory import SimMemory
from angr.storage.memory_object import SimMemoryObject
from angr.storage.paged_memory import SimPagedMemory
| {
"content_hash": "05c445f8eeb0e630fd9092941bb011ef",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 47,
"alnum_prop": 0.8617021276595744,
"repo_name": "Ruide/angr-dev",
"id": "7aa6f4301a3ed985770f5a2e475ec4bddfbab59f",
"size": "188",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simuvex/simuvex/storage/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2962"
},
{
"name": "Batchfile",
"bytes": "4542"
},
{
"name": "C",
"bytes": "18511978"
},
{
"name": "C++",
"bytes": "295194"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "12558"
},
{
"name": "OpenEdge ABL",
"bytes": "2415"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "5611416"
},
{
"name": "Shell",
"bytes": "41791"
}
],
"symlink_target": ""
} |
import unittest
import gopay
from tests.unit.utils import Utils
class TestCreatePayment(unittest.TestCase):
"""TestCreatePayment class
To execute test for certain method properly it is necessary to add prefix 'test' to its name.
"""
def setUp(self):
self.payments = gopay.payments(
{
"goid": Utils.GO_ID,
"clientId": Utils.CLIENT_ID,
"clientSecret": Utils.CLIENT_SECRET,
"gatewayUrl": Utils.GATEWAY_URL,
}
)
def test_create_payment(self):
base_payment = Utils.create_base_payment()
response = self.payments.create_payment(base_payment)
if "error_code" not in str(response.json):
print("Payment: " + str(response.json))
print("Payment id: " + str(response.json["id"]))
print("Payment gwUrl: " + str(response.json["gw_url"]))
else:
print("Error: " + str(response.json))
| {
"content_hash": "77a5b66104d500c06bc548080c852a1a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 28.852941176470587,
"alnum_prop": 0.5800203873598369,
"repo_name": "gopaycommunity/gopay-python-api",
"id": "ff2fdf07db56bab601e3b3c5c934fe7f81db7841",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/create_payment_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57702"
}
],
"symlink_target": ""
} |
import os, shutil
import glob, zipfile
import re
import pprint
from xml.etree.ElementTree import ElementTree
from collections import OrderedDict
from operator import itemgetter
import logging
import datetime
dateTimeInfo = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
loggingName = "aids_export" + dateTimeInfo + ".log"
logging.basicConfig(filename=loggingName,level=logging.INFO)
itemDirectory = "J:\\"
zipOutDir = "I:\\aids-meta-zip-out\\"
xmlOutputDir = "G:\\aids-drupal-xml\\"
fileMatch = "AP[0-9]+\.tif"
# pull list of SORTED item IDs from folder where XML files are stored
# (assumes Jeff will create one XML file / item and place them in a directory)
def getFileList(itemDirectory, extension):
fileList = {}
for root, sub, files in os.walk(itemDirectory):
for item in files:
if (re.match(fileMatch, item)):
print("adding item " + item)
myFileSize = os.path.getsize(os.path.join(root,item))
fileList[item] = myFileSize
else:
logging.info("Skipping file " + item + " name pattern did not match")
itemIdList = {}
for fileNameKey, fileSize in fileList.items():
print("checking file " + fileNameKey)
if fileNameKey.find(extension) != -1 :
print("Adding found file " + fileNameKey + " of size " + str(fileSize))
itemIdList[fileNameKey.split('.')[0]] = fileSize #get the id only no extension
else:
logging.info("Could not find file name key " + fileNameKey +" with extension " + extension + " name pattern did not match")
# sorted smallest to largest
sortedDict = OrderedDict(sorted(itemIdList.items(), key=itemgetter(1)))
return sortedDict;
# walk through file tree, find all items matching an item ID, return list of files with paths
def findMatchingItems(idStr, itemDirectoryStr):
matchingItems = []
for root, dirs, files in os.walk(itemDirectoryStr):
for item in files:
if (re.match(idStr + "\.", item)):
matchingItems.append(os.path.join(root,item))
return matchingItems;
#get the list of files that can be added to the zip
#if the data cannot be found it is logged
def getFileSet(idList):
print("create zipable file set called")
filesToAdd = []
for id in idList:
myFiles = findMatchingItems(id, itemDirectory)
print("adding id " + id + " to current set" )
if len(myFiles) == 1:
fileName = id + ".xml"
xmlFile = os.path.join(xmlOutputDir, fileName)
if( os.path.isfile(xmlFile) ):
filesToAdd.append(xmlFile)
filesToAdd.append(myFiles[0])
fileName = id + ".xml"
else:
logging.info("skipping file " + id + " no xml file found")
else:
logging.info("Bad files had len of " + str(len(myFiles)) + " for id " + id)
print("done prcessing zip set")
return filesToAdd
#zip up the list of files into a zip archive
def createZipSet(files, zipFileName):
print("create zip set called " + zipFileName)
with zipfile.ZipFile(zipFileName, 'w', allowZip64=True) as myzip:
for aFile in files:
print("adding file " + os.path.basename(aFile))
myzip.write(aFile, os.path.basename(aFile))
def createFolderSet(files, folderName):
print("Create folder set called " + folderName)
def processSets(offset, maxFilesToProcess, zipOutput):
fileIdList = getFileList(itemDirectory, "tif")
setSize = len(fileIdList)
isZipOutput = False
if(not maxFilesToProcess):
maxFilesToProcess = setSize + 1
if(not offset):
offset = 0
if(zipOutput.lower() == "yes"):
isZipOutput = True
offset = int(offset)
maxFilesToProcess = int(maxFilesToProcess)
setSize = int(setSize)
print ("Max files to process = " + str(maxFilesToProcess))
print ("Offset = " + str(offset))
counter = 1
totalBytes = 0
fileSet = []
startCount = 1
for fileName, fileSize in fileIdList.items():
if( (counter >= offset) and (counter <= maxFilesToProcess) ) :
print("counter = " + str(counter) + " processing file " + fileName + " with size " + str(fileSize))
nextFile = fileName
if( (totalBytes + fileSize) < 2000000000): #keep adding files until 2GB max data set size reached
print("file size " + str(totalBytes + fileSize) + " less than 2Gb")
totalBytes = totalBytes + fileSize
fileSet.append(fileName)
counter = counter + 1
else: #we've hit the 2GB limit write out the data
print("file size " + str(totalBytes + fileSize) + " Larger than 2Gb adding file " + fileName + " to next set")
zipFileSet = getFileSet(fileSet)
if( isZipOutput ):
createZipSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip")
print("creating zip file set " + zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip size = " + str(totalBytes))
else:
createFolderSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter))
print("creating folder file set " + zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter) + ".zip size = " + str(totalBytes))
totalBytes = fileSize
fileSet = []
fileSet.append(fileName)
counter = counter + 1
startCount = counter
print("resetting startCount " + str(startCount) + "offset = " + str(offset) + "")
if(len(fileSet) > 0): #handle the remaining files
zipFileSet = getFileSet(fileSet)
if( isZipOutput):
createZipSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter - 1) + ".zip")
print("creating zip set " + zipOutDir + "aep_" + str(startCount) + "_to_" + str(counter -1) + ".zip size = " + str(totalBytes))
else:
createFolderSet(zipFileSet, zipOutDir +"aep_" + str(startCount) + "_to_" + str(counter - 1))
print("creating folder set " + zipOutDir + "aep_" + str(startCount) + "_to_" + str(counter -1) + " size = " + str(totalBytes))
# maxFilesPerZip = input("Please enter maximum number of files per zip file: ")
maxFilesToProcess = input("Please enter maximum number of files to process enter to process all: ")
offset = input("Please enter the offset position (inclusive) press enter to start from the beginning: ")
zipOutput = input("Zip output(yes/no) - enter and default is folder output: ")
processSets(offset, maxFilesToProcess)
| {
"content_hash": "779d50a6fa500c75504b8e83cc4a4b5b",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 151,
"avg_line_length": 39.87719298245614,
"alnum_prop": 0.6110866695996481,
"repo_name": "rochester-rcl/islandora-import-scripts",
"id": "1527c84f2d47a6f4fbd2d1f9f8038d60b4284b91",
"size": "6819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aids_by_size_pkg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194323"
}
],
"symlink_target": ""
} |
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: rulesets/admin.py
Author: Jon Gunderson
"""
from __future__ import absolute_import
from django.contrib import admin
from .models import Ruleset
class RulesetAdmin(admin.ModelAdmin):
list_display = ('title', 'tooltip', 'version', 'author')
list_filter = ('author', 'version')
admin.site.register(Ruleset, RulesetAdmin)
| {
"content_hash": "2588cdd91ad0b671ea6cb3362700c843",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 28.53125,
"alnum_prop": 0.764512595837897,
"repo_name": "opena11y/fae2",
"id": "51df7a9c38d1552784c5a68a379291a45472708b",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fae2/rulesets/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13797"
},
{
"name": "HTML",
"bytes": "370034"
},
{
"name": "Java",
"bytes": "128994"
},
{
"name": "JavaScript",
"bytes": "2157386"
},
{
"name": "Python",
"bytes": "500936"
},
{
"name": "Shell",
"bytes": "2666"
}
],
"symlink_target": ""
} |
import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.tensorboard.plugins import projector
import argparse
def create_embeddings(sess, log_dir, embedding_file='', tensor_name='embedding'):
""" Add the embeddings to input TensorFlow session and writes a metadata_file containing the words in the vocabulary
:param sess: TF session
:param log_dir: destination directory for the model and metadata (the one to which TensorBoard points)
:param embedding_file: embedding file
:param tensor_name: tensor name
:return:
"""
embedding = None
embedding_dimensions = 0
vocab_size = 0
# write labels
with open(os.path.join(log_dir, tensor_name + '_' + 'metadata.tsv'), 'w') as metadata_file:
with open(embedding_file, 'r') as inputfile:
for i, line in enumerate(inputfile):
line = line.rstrip()
values = line.split()
# the first line is always the header based on what we produce in the embeddings_knn.py
if i == 0:
vocab_size = int(values[0])
embedding_dimensions = int(values[1])
embedding = np.empty((vocab_size, embedding_dimensions), dtype=np.float32)
else:
# accounts for the case of words with spaces
word = ' '.join(values[0:len(values) - embedding_dimensions]).strip()
coefs = np.asarray(values[-embedding_dimensions:], dtype='float32')
embedding[i - 1] = coefs
metadata_file.write(word + '\n')
X = tf.Variable([0.0], name=tensor_name)
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(set_x, feed_dict={place: embedding})
def add_multiple_embeddings(log_dir, file_list, name_list):
""" Creates the files necessary for the multiple embeddings
:param log_dir: destination directory for the model and metadata (the one to which TensorBoard points)
:param file_list: list of embeddings files
:param name_list: names of the embeddings files
:return:
"""
# setup a TensorFlow session
tf.reset_default_graph()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
config = projector.ProjectorConfig()
for i, file in enumerate(file_list):
tensor_name = name_list[i]
print('creating the embedding with the name ' + tensor_name)
create_embeddings(sess, log_dir, embedding_file=file,
tensor_name=tensor_name)
# create a TensorFlow summary writer
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = tensor_name + ':0'
embedding_conf.metadata_path = os.path.join(tensor_name + '_' + 'metadata.tsv')
projector.visualize_embeddings(summary_writer, config)
# save the model
saver = tf.train.Saver()
saver.save(sess, os.path.join(log_dir, tensor_name + '_' + "model.ckpt"))
print('finished successfully!')
if __name__ == "__main__":
# create_embeddings(embedding_file='/mnt/storage01/lebret/data/embeddings/glove_noswearing_embeddings.vec',
# tensor_name='glove_no_swearing')
parser = argparse.ArgumentParser(description='Create files for tensorboard visualization')
parser.add_argument('-l', '--logfile',
help='Path for log file read by TensorBoard; defaults to "log"; ' +
'file is created if it doesn\'t exist',
dest='logfile')
parser.add_argument('-f', '--files',
help='List of embedding files to be visualized', nargs='+', type=str,
dest='file_list', required=True)
parser.add_argument('-n', '--names',
help='List of names you want to assign to the embeddings', nargs='+', type=str,
dest='name_list', required=True)
options = parser.parse_args()
if options.logfile:
log_dir = options.logfile
else:
log_dir = 'log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
file_list = options.file_list
name_list = options.name_list
add_multiple_embeddings(log_dir, file_list, name_list)
| {
"content_hash": "9e8c0d186dee05560cdd3fc5264f7df8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 120,
"avg_line_length": 40.03603603603604,
"alnum_prop": 0.614986498649865,
"repo_name": "harkous/embeddingsviz",
"id": "881948dec46245679d09a0be1b2d831cea1038c3",
"size": "4444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "embeddingsviz/embeddings_formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10319"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.