repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tensorflow/federated
|
refs/heads/master
|
tensorflow_federated/python/simulation/baselines/stackoverflow/tag_prediction_preprocessing.py
|
1
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing library for Stack Overflow tag prediction tasks."""
from typing import Callable, List
import tensorflow as tf
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines.stackoverflow import constants
def build_to_ids_fn(word_vocab: List[str],
tag_vocab: List[str]) -> Callable[[tf.Tensor], tf.Tensor]:
"""Constructs a function mapping examples to sequences of token indices."""
word_vocab_size = len(word_vocab)
word_table_values = tf.range(word_vocab_size, dtype=tf.int64)
word_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(word_vocab, word_table_values),
num_oov_buckets=1)
tag_vocab_size = len(tag_vocab)
tag_table_values = tf.range(tag_vocab_size, dtype=tf.int64)
tag_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(tag_vocab, tag_table_values),
num_oov_buckets=1)
def to_ids(example):
"""Converts a Stack Overflow example to a bag-of-words/tags format."""
sentence = tf.strings.join([example['tokens'], example['title']],
separator=' ')
words = tf.strings.split(sentence)
tokens = word_table.lookup(words)
token_sums = tf.reduce_sum(tf.one_hot(tokens, word_vocab_size), axis=0)
num_tokens = tf.reduce_sum(token_sums)
tokens = tf.math.divide_no_nan(token_sums, num_tokens)
tags = example['tags']
tags = tf.strings.split(tags, sep='|')
tags = tag_table.lookup(tags)
tags = tf.one_hot(tags, tag_vocab_size + 1)
tags = tf.reduce_sum(tags, axis=0)[:tag_vocab_size]
return (tokens, tags)
return to_ids
def create_preprocess_fn(
preprocess_spec: client_spec.ClientSpec,
word_vocab: List[str],
tag_vocab: List[str],
num_parallel_calls: int = tf.data.experimental.AUTOTUNE
) -> Callable[[tf.data.Dataset], tf.data.Dataset]:
"""Creates a preprocessing function for Stack Overflow tag prediction data.
This function creates a `tff.Computation` which takes a dataset, and returns
a preprocessed dataset. This preprocessing shuffles the dataset, repeats it
some number of times, takes a maximum number of examples, and then maps the
elements to tuples of the form (tokens, tags), where tokens are bag-of-words
vectors, and tags are binary vectors indicating that a given tag is associated
with the example.
Args:
preprocess_spec: A `tff.simulation.baselines.ClientSpec` containing
information on how to preprocess clients.
word_vocab: A list of strings representing the in-vocabulary words.
tag_vocab: A list of tokens representing the in-vocabulary tags.
num_parallel_calls: An integer representing the number of parallel calls
used when performing `tf.data.Dataset.map`.
Returns:
A callable taking as input a `tf.data.Dataset`, and returning a
`tf.data.Dataset` formed by preprocessing according to the input arguments.
"""
if not word_vocab:
raise ValueError('word_vocab must be non-empty.')
if not tag_vocab:
raise ValueError('tag_vocab must be non-empty.')
shuffle_buffer_size = preprocess_spec.shuffle_buffer_size
if shuffle_buffer_size is None:
shuffle_buffer_size = constants.DEFAULT_SHUFFLE_BUFFER_SIZE
def preprocess_fn(dataset):
if shuffle_buffer_size > 1:
dataset = dataset.shuffle(shuffle_buffer_size)
if preprocess_spec.num_epochs > 1:
dataset = dataset.repeat(preprocess_spec.num_epochs)
if preprocess_spec.max_elements is not None:
dataset = dataset.take(preprocess_spec.max_elements)
to_ids = build_to_ids_fn(word_vocab, tag_vocab)
dataset = dataset.map(to_ids, num_parallel_calls=num_parallel_calls)
return dataset.batch(preprocess_spec.batch_size)
return preprocess_fn
|
castlecms/castle.cms
|
refs/heads/master
|
castle/cms/tasks/email.py
|
1
|
from castle.cms import subscribe
from castle.cms import utils
from castle.cms.browser.utils import Utils
from collective.celery import task
from plone import api
from urllib import urlencode
from zope.globalrequest import getRequest
@task.as_admin()
def send_email(*args, **kwargs):
utils.send_email(*args, **kwargs)
@task.as_admin()
def send_email_to_subscribers(subject, html, categories=None, sender=None):
_utils = Utils(api.portal.get(), getRequest())
public_url = _utils.get_public_url()
check_categories = (categories is not None and len(categories) != 0)
for subscriber in subscribe.all():
if check_categories:
# If there's no chosen categories, they recieve everything
if ('categories' in subscriber and
len(subscriber['categories']) > 0):
# make sure that this message falls under one of
# their categories
if len(categories.intersection(subscriber['categories'])) == 0:
continue
query = urlencode({
'email': subscriber.get('email'),
'code': subscriber.get('code')
})
unsubscribe_url = '%s/@@unsubscribe?%s' % (
public_url.rstrip('/'),
query)
change_url = '%s/@@changesubscription?%s' % (
public_url.rstrip('/'),
query)
unsubscribe_url = unsubscribe_url.encode('utf8')
change_url = change_url.encode('utf8')
html = html.replace('{{unsubscribe_url}}', unsubscribe_url)
html = html.replace('{{change_url}}', change_url)
utils.send_email([subscriber.get('email')], subject, html, sender=sender)
|
bhilburn/gnuradio
|
refs/heads/master
|
gr-filter/examples/benchmark_filters.py
|
17
|
#!/usr/bin/env python
#
# Copyright 2005-2007,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import random
from optparse import OptionParser
from gnuradio import gr
from gnuradio import blocks, filter
from gnuradio.eng_option import eng_option
def make_random_complex_tuple(L):
result = []
for x in range(L):
result.append(complex(random.uniform(-1000,1000),
random.uniform(-1000,1000)))
return tuple(result)
def benchmark(name, creator, dec, ntaps, total_test_size, block_size):
block_size = 32768
tb = gr.top_block()
taps = make_random_complex_tuple(ntaps)
src = blocks.vector_source_c(make_random_complex_tuple(block_size), True)
head = blocks.head(gr.sizeof_gr_complex, int(total_test_size))
op = creator(dec, taps)
dst = blocks.null_sink(gr.sizeof_gr_complex)
tb.connect(src, head, op, dst)
start = time.time()
tb.run()
stop = time.time()
delta = stop - start
print "%16s: taps: %4d input: %4g, time: %6.3f taps/sec: %10.4g" % (
name, ntaps, total_test_size, delta, ntaps*total_test_size/delta)
def main():
parser = OptionParser(option_class=eng_option)
parser.add_option("-n", "--ntaps", type="int", default=256)
parser.add_option("-t", "--total-input-size", type="eng_float", default=40e6)
parser.add_option("-b", "--block-size", type="intx", default=50000)
parser.add_option("-d", "--decimation", type="int", default=1)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
ntaps = options.ntaps
total_input_size = options.total_input_size
block_size = options.block_size
dec = options.decimation
benchmark("filter.fir_filter_ccc", filter.fir_filter_ccc,
dec, ntaps, total_input_size, block_size)
benchmark("filter.fft_filter_ccc", filter.fft_filter_ccc,
dec, ntaps, total_input_size, block_size)
if __name__ == '__main__':
main()
|
smn/garelay
|
refs/heads/develop
|
setup.py
|
1
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
with open(os.path.join(here, 'requirements.txt')) as f:
requires = filter(None, f.readlines())
with open(os.path.join(here, 'VERSION')) as f:
version = f.read().strip()
setup(name='garelay',
version=version,
description='garelay',
long_description=README,
classifiers=[
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Praekelt Foundation',
author_email='dev@praekelt.com',
url='None',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
entry_points={})
|
RAPD/RAPD
|
refs/heads/master
|
src/plugins/subcontractors/aimless.py
|
1
|
"""Functions for parsing aimless logs"""
"""
This file is part of RAPD
Copyright (C) 2017, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2017-05-09"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import argparse
# import from collections import OrderedDict
# import datetime
# import glob
# import json
# import logging
# import multiprocessing
import os
from pprint import pprint
# import pymongo
# import re
# import redis
# import shutil
# import subprocess
import sys
# import time
# import unittest
# import urllib2
# import uuid
# RAPD imports
# import commandline_utils
# import detectors.detector_utils as detector_utils
# import utils
# import utils.credits as credits
from utils.r_numbers import try_int, try_float
# Import smartie.py from the installed CCP4 package
# smartie.py is a python script for parsing log files from CCP4
try:
sys.path.append(os.path.join(os.environ["CCP4"], "share", "smartie"))
except KeyError as e:
print "\nError importing smartie from CCP4."
print "Environmental variable %s not set. Exiting." % e
exit(9)
import smartie
# Software dependencies
VERSIONS = {
# "eiger2cbf": ("160415",)
}
def parse_aimless(logfile):
"""
Parses the aimless logfile in order to pull out data for
graphing and the results summary table.
logfile should be input as the name of the log file
Relevant values for the summary table are stored in a dict.
key = name of result value
value = list of three numbers, 1 - Overall
2 - Inner Shell
3 - Outer Shell
Relevant information for creating plots are stored in a dict,
with the following format for each entry (i.e. each plot):
{"<*plot label*>":{
"data":{
"parameters":{<*line parameters*>},
"series":[
{xs : [],
ys : []
}
]
}
"parameters" : {<*plot parameters*>}
}
...
...
}
"""
log = smartie.parselog(logfile)
# print log.nfragments()
# print dir(log.fragment(0))
# Pull out information for the results summary table.
flag = True
summary = log.keytext(0).message().split("\n")
# print summary
# For some reason "Anomalous flag switched ON" is not always
# found, so the line below creates a blank entry for the
# the variable that should be created when that phrase is
# found, eliminating the problem where the program reports that
# the variable anomalous_report is referenced before assignment.
# anomalous_report = ""
int_results = {"anomalous_report": ""}
for line in summary:
# print line, len(line)
if "Space group" in line:
int_results["scaling_spacegroup"] = line.strip().split(": ")[-1]
elif "Average unit cell" in line:
int_results["scaling_unit_cell"] = [try_float(x) for x in line.split()[3:]]
elif "Anomalous flag switched ON" in line:
int_results["text2"] = line
elif "Low resolution limit" in line:
int_results["bins_low"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "High resolution limit" in line:
int_results["bins_high"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge" in line and "within" in line:
int_results["rmerge_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge" in line and "all" in line:
int_results["rmerge_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmeas" in line and "within" in line:
int_results["rmeas_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmeas" in line and "all" in line:
int_results["rmeas_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rpim" in line and "within" in line:
int_results["rpim_anom"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rpim" in line and "all" in line:
int_results["rpim_norm"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Rmerge in top intensity bin" in line:
int_results["rmerge_top"] = try_float(line.split()[-3], 0)
elif "Total number of observations" in line:
int_results["total_obs"] = [try_int(x, 0) for x in line.split()[-3:]]
elif "Total number unique" in line:
int_results["unique_obs"] = [try_int(x, 0) for x in line.split()[-3:]]
elif "Mean((I)/sd(I))" in line:
int_results["isigi"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Mn(I) half-set correlation CC(1/2)" in line:
int_results["cc-half"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Completeness" in line:
int_results["completeness"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Multiplicity" in line:
int_results["multiplicity"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Anomalous completeness" in line:
int_results["anom_completeness"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Anomalous multiplicity" in line:
int_results["anom_multiplicity"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "DelAnom correlation between half-sets" in line:
int_results["anom_correlation"] = [try_float(x, 0) for x in line.split()[-3:]]
elif "Mid-Slope of Anom Normal Probability" in line:
int_results["anom_slope"] = [try_float(x, 0) for x in line.split()[-3:]]
# This now unused due to shifting output
# int_results = {
# "bins_low": [try_float(x, 0) for x in summary[3].split()[-3:]],
# "bins_high": [try_float(x, 0) for x in summary[4].split()[-3:]],
# "rmerge_anom": [try_float(x, 0) for x in summary[6].split()[-3:]],
# "rmerge_norm": [try_float(x, 0) for x in summary[7].split()[-3:]],
# "rmeas_anom": [try_float(x, 0) for x in summary[8].split()[-3:]],
# "rmeas_norm": [try_float(x, 0) for x in summary[9].split()[-3:]],
# "rpim_anom": [try_float(x, 0) for x in summary[10].split()[-3:]],
# "rpim_norm": [try_float(x, 0) for x in summary[11].split()[-3:]],
# "rmerge_top": float(summary[12].split()[-3]),
# "total_obs": [try_int(x) for x in summary[13].split()[-3:]],
# "unique_obs": [try_int(x) for x in summary[14].split()[-3:]],
# "isigi": [try_float(x, 0) for x in summary[15].split()[-3:]],
# "cc-half": [try_float(x, 0) for x in summary[16].split()[-3:]],
# "completeness": [try_float(x, 0) for x in summary[17].split()[-3:]],
# "multiplicity": [try_float(x, 0) for x in summary[18].split()[-3:]],
# "anom_completeness": [try_float(x, 0) for x in summary[21].split()[-3:]],
# "anom_multiplicity": [try_float(x, 0) for x in summary[22].split()[-3:]],
# "anom_correlation": [try_float(x, 0) for x in summary[23].split()[-3:]],
# "anom_slope": [try_float(summary[24].split()[-3])],
# "scaling_spacegroup": space_group,
# "scaling_unit_cell": unit_cell,
# "text2": anomalous_report,
# }
# Smartie can pull table information based on a regular
# expression pattern that matches the table title from
# the aimless log file.
# NOTE : the regular expression must match the beginning
# of the table's title, but does not need to be the entire
# title.
#
# We will use this to pull out the data from tables we are
# interested in.
#
# The beginning of the titles for all common tables in the
# aimless log file are given below, but not all of them
# are currently used to generate a plot.
# scales = "=== Scales v rotation"
rfactor = "Analysis against all Batches"
cchalf = "Correlations CC(1/2)"
cc = "Run pair correlations by resolution"
# anisotropy = "Anisotropy analysis"
vresolution = "Analysis against resolution,"
# anomalous = "Analysis against resolution, with & without"
# rresolution = "Analysis against resolution for each run"
# intensity = "Analysis against intensity"
completeness = "Completeness & multiplicity"
# deviation = "Run 1, standard deviation" # and 2, 3, ...
# all_deviation = "All runs, standard deviation"
# effect = "Effect of parameter variance on sd(I)"
rcp = "Radiation damage"
# pprint(dir(log))
# for table in log.tables():
# print table.title()
# Grab plots - None is plot missing
plots = {}
try:
plots["Rmerge vs Frame"] = {
"x_data": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")],
"label": "Rmerge",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")],
"label": "Smoothed",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Rmerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "SmRmerge",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")]
}
]
},
{
"parameters": {
"linecolor": "3",
"linelabel": "Rmerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("Rmerge")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "SmRmerge",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_int(x) for x in \
log.tables(rfactor)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("SmRmerge")]
}
]
},
],
"parameters": {
"selectlabel": "Rmerge",
"toplabel": "Rmerge vs Batch for all Runs",
"xlabel": "Batch #",
"ylabel": "Rmerge",
},
}
# Plot not present
except IndexError:
plots["Rmerge vs Frame"] = None
try:
plots["Imean/RMS scatter"] = {
"x_data": [int(x) for x in log.tables(rfactor)[0].col("N")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rfactor)[0].col("I/rms")],
"label": "I/rms",
"pointRadius": 0
}
],
# "data": [
# {
# "parameters": {
# "linecolor": "3",
# "linelabel": "I/rms",
# "linetype": "11",
# "linewidth": "3",
# },
# "series": [
# {
# "xs" : [int(x) for x in log.tables(rfactor)[0].col("N")],
# "ys" : [try_float(x, 0.0) for x in \
# log.tables(rfactor)[0].col("I/rms")],
# }
# ]
# }
# ],
"parameters": {
"selectlabel": "Imean/RMS",
"toplabel": "Imean / RMS scatter",
"xlabel": "Batch Number",
"ylabel": "Imean/RMS"
}
}
# Plot not present
except IndexError:
plots["Imean/RMS scatter"] = None
try:
plots["Anomalous & Imean CCs vs Resolution"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CCanom")],
"label": "CCanom",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CC1/2")],
"label": "CC1/2",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "CCanom",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CCanom")],
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "CC1/2",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("CC1/2")],
}
]
}
],
"parameters": {
"selectlabel": "CC",
"toplabel": "Anomalous & Imean CCs vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": "CC"
}
}
# Plot not present
except IndexError:
plots["Anomalous & Imean CCs vs Resolution"] = None
try:
plots["RMS correlation ratio"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("RCRanom")],
"label": "RCRanom",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "RCRanom",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(cchalf)[0].col("RCRanom")]
}
]
}
],
"parameters": {
"selectlabel": "RCR",
"toplabel": "RMS correlation ratio",
"xlabel": "1/d^2",
"ylabel": "RCR"
}
}
# Plot not present
except IndexError:
plots["RMS correlation ratio"] = None
try:
plots["I/sigma, Mean Mn(I)/sd(Mn(I))"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Mn(I/sd)")],
"label": "Mn(I/sd)",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("I/RMS")],
"label": "I/RMS",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "I/RMS",
"linetype": "11",
"linewidth": "3",
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("I/RMS")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "Mn(I/sd)",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Mn(I/sd)")]
}
]
}
],
"parameters": {
"selectlabel": "I/σI",
"toplabel": "I/sigma, Mean Mn(I)/sd(Mn(I))",
"xlabel": "1/d^2",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["I/sigma, Mean Mn(I)/sd(Mn(I))"] = None
try:
plots["rs_vs_res"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmrg")],
"label": "Rmerge",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rfull")],
"label": "Rfull",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmeas")],
"label": "Rmeas",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rpim")],
"label": "Rpim",
"pointRadius": 0
},
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Remerge",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmrg")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "Rfull",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rfull")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "Rmeas",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rmeas")]
}
]
},
{
"parameters": {
"linecolor": "6",
"linelabel": "Rpim",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("Rpim")]
}
]
}
],
"parameters": {
"selectlabel": "R Factors",
"toplabel": "Rmerge, Rfull, Rmeas, Rpim vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["rs_vs_res"] = None
try:
plots["Average I, RMS deviation, and Sd"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("RMSdev")],
"label": "RMSdev",
"pointRadius": 0
},
{
"data": [try_int(x, 0) for x in \
log.tables(vresolution)[0].col("AvI")],
"label": "AvgI",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("sd")],
"label": "SD",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Average I",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_int(x, 0) for x in log.tables(vresolution)[0].col("AvI")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "RMS deviation",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("RMSdev")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "std. dev.",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(vresolution)[0].col("sd")]
}
]
}
],
"parameters": {
"selectlabel": "I vs Res",
"toplabel": "Average I, RMS dev., and std. dev.",
"xlabel": "Dmid (Ansgstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Average I, RMS deviation, and Sd"] = None
try:
plots["Completeness"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("%poss")],
"label": "All",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("C%poss")],
"label": "C%poss",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoCmp")],
"label": "AnoCmp",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoFrc")],
"label": "AnoFrc",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "%poss",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("%poss")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "C%poss",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("C%poss")]
}
]
},
{
"parameters": {
"linecolor": "5",
"linelabel": "AnoCmp",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoCmp")]
}
]
},
{
"parameters": {
"linecolor": "6",
"linelabel": "AnoFrc",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoFrc")]
}
]
}
],
"parameters": {
"selectlabel": "Completeness",
"toplabel": "Completeness vs. Resolution",
"xlabel": "Dmid (Angstroms)",
"ylabel": "Percent"
}
}
# Plot not present
except IndexError:
plots["Completeness"] = None
try:
plots["Redundancy"] = {
"x_data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("Mlplct")],
"label": "All",
"pointRadius": 0
},
{
"data": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoMlt")],
"label": "Anomalous",
"pointRadius": 0
},
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "multiplicity",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("Mlplct")]
}
]
},
{
"parameters": {
"linecolor": "4",
"linelabel": "anomalous multiplicity",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("1/d^2")],
"ys": [try_float(x, 0.0) for x in \
log.tables(completeness)[0].col("AnoMlt")]
}
]
}
],
"parameters": {
"selectlabel": "Redundancy",
"toplabel": "Redundancy",
"xlabel": "Dmid (Angstroms)",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Redundancy"] = None
try:
plots["Radiation Damage"] = {
"x_data": [int(x) for x in \
log.tables(rcp)[0].col("Batch")],
"y_data": [
{
"data": [try_float(x, 0.0) for x in \
log.tables(rcp)[0].col("Rcp")],
"label": "RCP",
"pointRadius": 0
}
],
"data": [
{
"parameters": {
"linecolor": "3",
"linelabel": "Rcp",
"linetype": "11",
"linewidth": "3"
},
"series": [
{
"xs": [int(x) for x in \
log.tables(rcp)[0].col("Batch")],
"ys": [try_float(x, 0.0) for x in \
log.tables(rcp)[0].col("Rcp")]
}
]
}
],
"parameters": {
"selectlabel": "RCP",
"toplabel": "Rcp vs. Batch",
"xlabel": "Relative frame difference",
"ylabel": ""
}
}
# Plot not present
except IndexError:
plots["Radiation Damage"] = None
# Return to the main program.
return (plots, int_results)
def get_cc(log_file):
"""
Returns the CCs in the section 'Matrix of correlations of E^2 between runs'
"""
# Handle filename
if isinstance(log_file, str):
log_lines = open(log_file, "r").readlines()
elif isinstance(log_file, list):
log_lines = log_file
else:
raise TypeError("Function takes a file name or list of lines from log file")
results = {
"cc": {},
"maximum_resolution": {},
"number": {},
"runs": []
}
# Look through for the lines of interest
in_range = False
for log_line in log_lines:
if "Matrix of correlations of E^2 between runs" in log_line:
in_range = True
in_body = False
if in_range:
# print log_line.rstrip()
if "$TABLE:" in log_line:
in_range = False
# In the body of the table
if in_body:
# print "body>>", log_line.rstrip()
body_split = log_line.split()
if body_split:
# The CC
if body_split[0] == "Run":
# print body_split
from_run = int(body_split[1])
to_run = from_run + 1
for f in body_split[3:]:
results["cc"][(from_run, to_run)] = float(f)
to_run += 1
# Number of reflections used in the CC
elif body_split[0] == "N":
# print from_run, body_split
to_run = from_run + 1
for f in body_split[1:]:
results["number"][(from_run, to_run)] = int(f)
to_run += 1
# Not in the body
else:
# Maximum resolution of the runs
if "maximum resolution" in log_line:
run = int(log_line.split()[1])
max_res = float(log_line.split()[4])
results["maximum_resolution"][run] = max_res
# Header of the table
elif " Run " in log_line:
# print "header", log_line.rstrip()
in_body = True
header_split = log_line.split()
runs = [1] + [ int(i) for i in header_split[1:] ]
results["runs"] = runs
return results
def main():
"""
The main process docstring
This function is called when this module is invoked from
the commandline
"""
print "main"
args = get_commandline()
print args
# res = parse_aimless(args.file)
# pprint(res)
get_cc(args.file)
def get_commandline():
"""
Grabs the commandline
"""
print "get_commandline"
# Parse the commandline arguments
commandline_description = "Parse an aimless log file"
parser = argparse.ArgumentParser(description=commandline_description)
# Directory or files
parser.add_argument(action="store",
dest="file",
default=False,
help="Template for image files")
# Print help message is no arguments
if len(sys.argv[1:])==0:
parser.print_help()
parser.exit()
return parser.parse_args()
if __name__ == "__main__":
# Execute code
main()
|
Cinntax/home-assistant
|
refs/heads/dev
|
homeassistant/components/device_tracker/setup.py
|
3
|
"""Device tracker helpers."""
import asyncio
from typing import Dict, Any, Callable, Optional
from types import ModuleType
import attr
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.helpers import config_per_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from .const import (
DOMAIN,
PLATFORM_TYPE_LEGACY,
CONF_SCAN_INTERVAL,
SCAN_INTERVAL,
SOURCE_TYPE_ROUTER,
LOGGER,
)
@attr.s
class DeviceTrackerPlatform:
"""Class to hold platform information."""
LEGACY_SETUP = (
"async_get_scanner",
"get_scanner",
"async_setup_scanner",
"setup_scanner",
)
name = attr.ib(type=str)
platform = attr.ib(type=ModuleType)
config = attr.ib(type=Dict)
@property
def type(self):
"""Return platform type."""
for methods, platform_type in ((self.LEGACY_SETUP, PLATFORM_TYPE_LEGACY),):
for meth in methods:
if hasattr(self.platform, meth):
return platform_type
return None
async def async_setup_legacy(self, hass, tracker, discovery_info=None):
"""Set up a legacy platform."""
LOGGER.info("Setting up %s.%s", DOMAIN, self.type)
try:
scanner = None
setup = None
if hasattr(self.platform, "async_get_scanner"):
scanner = await self.platform.async_get_scanner(
hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "get_scanner"):
scanner = await hass.async_add_job(
self.platform.get_scanner, hass, {DOMAIN: self.config}
)
elif hasattr(self.platform, "async_setup_scanner"):
setup = await self.platform.async_setup_scanner(
hass, self.config, tracker.async_see, discovery_info
)
elif hasattr(self.platform, "setup_scanner"):
setup = await hass.async_add_job(
self.platform.setup_scanner,
hass,
self.config,
tracker.see,
discovery_info,
)
else:
raise HomeAssistantError("Invalid legacy device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, self.config, scanner, tracker.async_see, self.type
)
return
if not setup:
LOGGER.error("Error setting up platform %s", self.type)
return
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error setting up platform %s", self.type)
async def async_extract_config(hass, config):
"""Extract device tracker config and split between legacy and modern."""
legacy = []
for platform in await asyncio.gather(
*(
async_create_platform_type(hass, config, p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
)
):
if platform is None:
continue
if platform.type == PLATFORM_TYPE_LEGACY:
legacy.append(platform)
else:
raise ValueError(
"Unable to determine type for {}: {}".format(
platform.name, platform.type
)
)
return legacy
async def async_create_platform_type(
hass, config, p_type, p_config
) -> Optional[DeviceTrackerPlatform]:
"""Determine type of platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return None
return DeviceTrackerPlatform(p_type, platform, p_config)
@callback
def async_setup_scanner_platform(
hass: HomeAssistantType,
config: ConfigType,
scanner: Any,
async_see_device: Callable,
platform: str,
):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
update_lock = asyncio.Lock()
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen: Any = set()
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s",
platform,
interval,
)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = await scanner.async_get_extra_attributes(mac)
except NotImplementedError:
extra_attributes = dict()
kwargs = {
"mac": mac,
"host_name": host_name,
"source_type": SOURCE_TYPE_ROUTER,
"attributes": {
"scanner": scanner.__class__.__name__,
**extra_attributes,
},
}
zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)
if zone_home:
kwargs["gps"] = [
zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE],
]
kwargs["gps_accuracy"] = 0
hass.async_create_task(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_create_task(async_device_tracker_scan(None))
|
flochaz/horizon
|
refs/heads/stable/juno
|
openstack_dashboard/dashboards/project/access_and_security/keypairs/urls.py
|
6
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import views
urlpatterns = patterns('',
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^import/$', views.ImportView.as_view(), name='import'),
url(r'^(?P<keypair_name>[^/]+)/download/$', views.DownloadView.as_view(),
name='download'),
url(r'^(?P<keypair_name>[^/]+)/generate/$', views.GenerateView.as_view(),
name='generate'),
url(r'^(?P<keypair_name>[^/]+)/(?P<optional>[^/]+)/generate/$',
views.GenerateView.as_view(), name='generate'),
)
|
nhorelik/django-rest-framework
|
refs/heads/master
|
rest_framework/utils/model_meta.py
|
71
|
"""
Helper function for returning the field information that is associated
with a model class. This includes returning all the forward and reverse
relationships and their associated metadata.
Usage: `get_field_info(model)` returns a `FieldInfo` instance.
"""
import inspect
from collections import namedtuple
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils import six
from rest_framework.compat import OrderedDict
FieldInfo = namedtuple('FieldResult', [
'pk', # Model field instance
'fields', # Dict of field name -> model field instance
'forward_relations', # Dict of field name -> RelationInfo
'reverse_relations', # Dict of field name -> RelationInfo
'fields_and_pk', # Shortcut for 'pk' + 'fields'
'relations' # Shortcut for 'forward_relations' + 'reverse_relations'
])
RelationInfo = namedtuple('RelationInfo', [
'model_field',
'related_model',
'to_many',
'has_through_model'
])
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = models.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def get_field_info(model):
"""
Given a model class, returns a `FieldInfo` instance, which is a
`namedtuple`, containing metadata about the various field types on the model
including information about their relationships.
"""
opts = model._meta.concrete_model._meta
pk = _get_pk(opts)
fields = _get_fields(opts)
forward_relations = _get_forward_relationships(opts)
reverse_relations = _get_reverse_relationships(opts)
fields_and_pk = _merge_fields_and_pk(pk, fields)
relationships = _merge_relationships(forward_relations, reverse_relations)
return FieldInfo(pk, fields, forward_relations, reverse_relations,
fields_and_pk, relationships)
def _get_pk(opts):
pk = opts.pk
while pk.rel and pk.rel.parent_link:
# If model is a child via multi-table inheritance, use parent's pk.
pk = pk.rel.to._meta.pk
return pk
def _get_fields(opts):
fields = OrderedDict()
for field in [field for field in opts.fields if field.serialize and not field.rel]:
fields[field.name] = field
return fields
def _get_forward_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
forward_relations = OrderedDict()
for field in [field for field in opts.fields if field.serialize and field.rel]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=False,
has_through_model=False
)
# Deal with forward many-to-many relationships.
for field in [field for field in opts.many_to_many if field.serialize]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=True,
has_through_model=(
not field.rel.through._meta.auto_created
)
)
return forward_relations
def _get_reverse_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
# Note that we have a hack here to handle internal API differences for
# this internal API across Django 1.7 -> Django 1.8.
# See: https://code.djangoproject.com/ticket/24208
reverse_relations = OrderedDict()
for relation in opts.get_all_related_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=relation.field.rel.multiple,
has_through_model=False
)
# Deal with reverse many-to-many relationships.
for relation in opts.get_all_related_many_to_many_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=True,
has_through_model=(
(getattr(relation.field.rel, 'through', None) is not None) and
not relation.field.rel.through._meta.auto_created
)
)
return reverse_relations
def _merge_fields_and_pk(pk, fields):
fields_and_pk = OrderedDict()
fields_and_pk['pk'] = pk
fields_and_pk[pk.name] = pk
fields_and_pk.update(fields)
return fields_and_pk
def _merge_relationships(forward_relations, reverse_relations):
return OrderedDict(
list(forward_relations.items()) +
list(reverse_relations.items())
)
def is_abstract_model(model):
"""
Given a model class, returns a boolean True if it is abstract and False if it is not.
"""
return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
|
abhisg/scikit-learn
|
refs/heads/master
|
sklearn/__init__.py
|
2
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
SchrodingersGat/kicad-library-utils
|
refs/heads/master
|
schlib/rules/S7_1.py
|
2
|
# -*- coding: utf-8 -*-
from rules.rule import *
import re
class Rule(KLCRule):
"""
Create the methods check and fix to use with the kicad lib files.
"""
def __init__(self, component):
super(Rule, self).__init__(component, 'Power-flag symbols follow some special rules/KLC-exceptions')
self.makePinINVISIBLE = False
self.makePinPowerInput = False
self.fixTooManyPins = False
self.fixPinSignalName = False
self.fixNoFootprint = False
def check(self):
"""
Proceeds the checking of the rule.
"""
fail = False
if self.component.isPossiblyPowerSymbol():
if (len(self.component.pins) != 1):
self.error("Power-flag symbols have exactly one pin")
fail = True
self.fixTooManyPins = True
else:
if (self.component.pins[0]['electrical_type'] != 'W'):
self.error("The pin in power-flag symbols has to be of a POWER-INPUT")
fail = True
self.makePinPowerInput = True
if (not self.component.pins[0]['pin_type'].startswith('N')):
self.error("The pin in power-flag symbols has to be INVISIBLE")
fail = True
self.makePinINVISIBLE = True
if ((self.component.pins[0]['name'] != self.component.name) and ('~'+self.component.pins[0]['name'] != self.component.name)):
self.error("The pin name ("+self.component.pins[0]['name']+") in power-flag symbols has to be the same as the component name ("+self.component.name+")")
fail = True
self.fixPinSignalName = True
# footprint field must be empty
if self.component.fields[2]['name'] != '' and self.component.fields[2]['name'] != '""':
self.error("Graphical symbols have no footprint association (footprint was set to '"+self.component.fields[2]['name']+"')")
fail = True
self.fixNoFootprint = True
# FPFilters must be empty
if len(self.component.fplist) > 0:
self.error("Graphical symbols have no footprint filters")
fail = True
self.fixNoFootprint = True
return fail
def fix(self):
"""
Proceeds the fixing of the rule, if possible.
"""
if self.fixTooManyPins:
self.info("FIX for too many pins in power-symbol not supported")
if self.makePinPowerInput:
self.info("FIX: switching pin-type to power-input")
self.component.pins[0]['electrical_type'] = 'W'
if self.makePinINVISIBLE:
self.info("FIX: making pin invisible")
self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type']
if self.fixPinSignalName:
newname = self.component.name
if self.component.name.startswith('~'):
newname = self.component.name[1:len(self.component.name)]
self.info("FIX: change pin name to '"+newname+"'")
self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type']
if self.fixNoFootprint:
self.info("FIX empty footprint association and FPFilters")
self.component.fplist.clear()
self.component.fields[2] = ''
|
jcsaaddupuy/clu-python
|
refs/heads/master
|
src/tests/test_configurableagent.py
|
1
|
#!/usr/bin/env python2
import unittest
from mock import Mock, MagicMock
from clu.agents.base import ConfigurableCluAgent
from clu.agents import CluAgent, ConfigurableCluAgent, CluException, CluAgentException
class ConfigurableCluAgentTestCase(unittest.TestCase):
def test_init_empty_params(self):
co = ConfigurableCluAgent({})
self.assertTrue(co.config.name == "")
self.assertTrue(co.name == "")
def test_init_one_kwargs_param(self):
co = ConfigurableCluAgent({"name":"myagent"})
self.assertTrue(co.config.name == "myagent")
self.assertTrue(co.name == "myagent")
def test_configcluagent_execute_raises_cluagentexception(self):
cluagent = ConfigurableCluAgent({})
with self.assertRaises(CluException):
cluagent.run()
def test_configcluagent_call_before_execute(self):
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
cluagent.before_execute=before_execute
with self.assertRaises(CluException):
cluagent.run()
before_execute.assert_called_once_with()
def test_configcluagent_call_before_execute_excption(self):
cluagent = ConfigurableCluAgent({})
before_execute=Mock(side_effect=CluAgentException())
cluagent.before_execute=before_execute
with self.assertRaises(CluException):
cluagent.run()
before_execute.assert_called_once_with()
def test_configcluagent_call_before_rexecute_after(self):
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
after_execute=Mock()
execute=Mock()
cluagent.before_execute=before_execute
cluagent.execute=execute
cluagent.after_execute=after_execute
cluagent.run()
before_execute.assert_called_once_with()
execute.assert_called_once_with()
after_execute.assert_called_once_with()
def test_cluagent_call_ensure_afterexecute_on_run_exception(self):
""" Ensure that ensure_after_execute is called when execute() raise an Exception """
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
after_execute=Mock()
ensure_after_execute=Mock()
execute=Mock(side_effect=Exception("In your face"))
cluagent.before_execute=before_execute
cluagent.execute=execute
cluagent.after_execute=after_execute
cluagent.ensure_after_execute=ensure_after_execute
with(self.assertRaises(CluAgentException)) as ex:
cluagent.run()
before_execute.assert_called_once_with()
execute.assert_called_once_with()
ensure_after_execute.assert_called_once_with()
def test_cluagent_call_ensure_afterexecute_on_after_execute_exception(self):
""" Ensure that ensure_after_execute is called when after_execute() raise an Exception """
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
after_execute=Mock(side_effect=Exception("In you face"))
ensure_after_execute=Mock()
execute=Mock()
cluagent.before_execute=before_execute
cluagent.execute=execute
cluagent.after_execute=after_execute
cluagent.ensure_after_execute=ensure_after_execute
with(self.assertRaises(CluAgentException)) as ex:
cluagent.run()
before_execute.assert_called_once_with()
execute.assert_called_once_with()
after_execute.assert_called_once_with()
ensure_after_execute.assert_called_once_with()
def test_cluagent_call_ensure_execute_eception_raised_on_after_execute_exception(self):
""" Ensure that when execute() and after_execute() raises an Exception, the first is raised"""
ex_ensure_after=Exception("In your face")
ex_after_execute=Exception("In your face su**cker")
after_execute=Mock(side_effect=ex_after_execute)
ensure_after_execute=Mock(side_effect=ex_ensure_after)
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
execute=Mock()
cluagent.before_execute=before_execute
cluagent.execute=execute
cluagent.after_execute=after_execute
cluagent.ensure_after_execute=ensure_after_execute
with(self.assertRaises(CluAgentException)) as e:
cluagent.run()
before_execute.assert_called_once_with()
execute.assert_called_once_with()
after_execute.assert_called_once_with()
ensure_after_execute.assert_called_once_with()
def test_cluagent_call_ensure_afterexecute_on_after_execute_exception_is_first(self):
""" Ensure that when execute() and after_execute() raises an Exception, the first is raised"""
ex_ensure_after=Exception("In your face")
ensure_after_execute=Mock(side_effect=ex_ensure_after)
cluagent = ConfigurableCluAgent({})
before_execute=Mock()
execute=Mock()
after_execute=Mock()
cluagent.before_execute=before_execute
cluagent.execute=execute
cluagent.after_execute=after_execute
cluagent.ensure_after_execute=ensure_after_execute
with(self.assertRaises(CluAgentException)) as e:
cluagent.run()
before_execute.assert_called_once_with()
execute.assert_called_once_with()
after_execute.assert_called_once_with()
ensure_after_execute.assert_called_once_with()
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(ConfigurableCluAgentTestCase))
return suite
if __name__ == '__main__':# pragma: no cover
unittest.TextTestRunner(verbosity=2).run(suite())
|
conductor-tutti/space_for_us
|
refs/heads/master
|
lib/werkzeug/contrib/sessions.py
|
295
|
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import tempfile
from os import path
from time import time
from random import random
from hashlib import sha1
from pickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.wsgi import ClosingIterator
from werkzeug.posixemulation import rename
from werkzeug._compat import PY2, text_type
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return text_type(random()).encode('ascii')
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode('ascii')
return sha1(b''.join([
salt,
str(time()).encode('ascii'),
_urandom()
])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = '.__wz_sess'
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None, renew_missing=False, mode=0o644):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(
sys.getfilesystemencoding() or 'utf-8')
assert not filename_template.endswith(_fs_transaction_suffix), \
'filename templates may not end with %s' % _fs_transaction_suffix
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
dir=self.path)
f = os.fdopen(fd, 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), 'rb')
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
openatx/uiautomator2
|
refs/heads/master
|
uiautomator2/widget.py
|
1
|
# coding: utf-8
#
import logging
import re
import time
from collections import defaultdict, namedtuple
from functools import partial
from pprint import pprint
from typing import Union
import requests
from logzero import logger, setup_logger
from lxml import etree
import uiautomator2 as u2
import uiautomator2.image as uim
from uiautomator2.image import compare_ssim, draw_point, imread
def xml2nodes(xml_content: Union[str, bytes]):
if isinstance(xml_content, str):
xml_content = xml_content.encode("utf-8")
root = etree.fromstring(xml_content)
nodes = []
for _, n in etree.iterwalk(root):
attrib = dict(n.attrib)
if "bounds" in attrib:
bounds = re.findall(r"(\d+)", attrib.pop("bounds"))
if len(bounds) != 4:
continue
lx, ly, rx, ry = map(int, bounds)
attrib['size'] = (rx - lx, ry - ly)
attrib.pop("index", None)
ok = False
for attrname in ("text", "resource-id", "content-desc"):
if attrname in attrib:
ok = True
break
if ok:
items = []
for k, v in sorted(attrib.items()):
items.append(k + ":" + str(v))
nodes.append('|'.join(items))
return nodes
def hierarchy_sim(xml1: str, xml2: str):
ns1 = xml2nodes(xml1)
ns2 = xml2nodes(xml2)
from collections import Counter
c1 = Counter(ns1)
c2 = Counter(ns2)
same_count = sum(
[min(c1[k], c2[k]) for k in set(c1.keys()).intersection(c2.keys())])
logger.debug("Same count: %d ns1: %d ns2: %d", same_count, len(ns1), len(ns2))
return same_count / (len(ns1) + len(ns2)) * 2
def read_file_content(filename: str) -> bytes:
with open(filename, "rb") as f:
return f.read()
def safe_xmlstr(s):
return s.replace("$", "-")
def frozendict(d: dict):
items = []
for k, v in sorted(d.items()):
items.append(k + ":" + str(v))
return '|'.join(items)
CompareResult = namedtuple("CompareResult", ["score", "detail"])
Point = namedtuple("Point", ['x', 'y'])
class Widget(object):
__domains = {
"lo": "http://localhost:17310",
}
def __init__(self, d: "u2.Device"):
self._d = d
self._widgets = {}
self._compare_results = {}
self.popups = []
self.logger = setup_logger()
self.logger.setLevel(logging.INFO)
@property
def wait_timeout(self):
return self._d.settings['wait_timeout']
def _get_widget(self, id: str):
if id in self._widgets:
return self._widgets[id]
widget_url = self._id2url(id)
r = requests.get(widget_url, timeout=3)
data = r.json()
self._widgets[id] = data
return data
def _id2url(self, id: str):
fields = re.sub("#.*", "", id).split(
"/") # remove chars after # and split host and id
assert len(fields) <= 2
if len(fields) == 1:
return f"http://localhost:17310/api/v1/widgets/{id}"
host = self.__domains.get(fields[0])
id = fields[1] # ignore the third part
if not re.match("^https?://", host):
host = "http://" + host
return f"{host}/api/v1/widgets/{id}"
def _eq(self, precision: float, a, b):
return abs(a - b) < precision
def _percent_equal(self, precision: float, a, b, asize, bsize):
return abs(a / min(asize) - b / min(bsize)) < precision
def _bounds2rect(self, bounds: str):
"""
Returns:
tuple: (lx, ly, width, height)
"""
if not bounds:
return 0, 0, 0, 0
lx, ly, rx, ry = map(int, re.findall(r"\d+", bounds))
return (lx, ly, rx - lx, ry - ly)
def _compare_node(self, node_a, node_b, size_a, size_b) -> float:
"""
Args:
node_a, node_b: etree.Element
size_a, size_b: tuple size
Returns:
CompareResult
"""
result_key = (node_a, node_b)
if result_key in self._compare_results:
return self._compare_results[result_key]
scores = defaultdict(dict)
# max 1
if node_a.tag == node_b.tag:
scores['class'] = 1
# max 3
for key in ('text', 'resource-id', 'content-desc'):
if node_a.attrib.get(key) == node_b.attrib.get(key):
scores[key] = 1 if node_a.attrib.get(key) else 0.1
# bounds = node_a.attrib.get("bounds")
# pprint(list(map(int, re.findall(r"\d+", bounds))))
ax, ay, aw, ah = self._bounds2rect(node_a.attrib.get("bounds"))
bx, by, bw, bh = self._bounds2rect(node_b.attrib.get("bounds"))
# max 2
peq = partial(self._percent_equal, 1 / 20, asize=size_a, bsize=size_b)
if peq(ax, bx) and peq(ay, by):
scores['left_top'] = 1
if peq(aw, bw) and peq(ah, bh):
scores['size'] = 1
score = round(sum(scores.values()), 1)
result = self._compare_results[result_key] = CompareResult(
score, scores)
return result
def node2string(self, node: etree.Element):
return node.tag + ":" + '|'.join([
node.attrib.get(key, "")
for key in ["text", "resource-id", "content-desc"]
])
def hybird_compare_node(self, node_a, node_b, size_a, size_b):
"""
Returns:
(scores, results)
Return example:
【3.0, 3.2], [CompareResult(score=3.0), CompareResult(score=3.2)]
"""
cmp_node = partial(self._compare_node, size_a=size_a, size_b=size_b)
results = []
results.append(cmp_node(node_a, node_b))
results.append(cmp_node(node_a.getparent(), node_b.getparent()))
a_children = node_a.getparent().getchildren()
b_children = node_b.getparent().getchildren()
if len(a_children) != len(b_children):
return results
children_result = []
a_children.remove(node_a)
b_children.remove(node_b)
for i in range(len(a_children)):
children_result.append(cmp_node(a_children[i], b_children[i]))
results.append(children_result)
return results
def _hybird_result_to_score(self, obj: Union[list, CompareResult]):
"""
Convert hybird_compare_node returns to score
"""
if isinstance(obj, CompareResult):
return obj.score
ret = []
for item in obj:
ret.append(self._hybird_result_to_score(item))
return ret
def replace_etree_node_to_class(self, root: etree.ElementTree):
for node in root.xpath("//node"):
node.tag = safe_xmlstr(node.attrib.pop("class", "") or "node")
return root
def compare_hierarchy(self, node, root, node_wsize, root_wsize):
results = {}
for node2 in root.xpath("/hierarchy//*"):
result = self.hybird_compare_node(node, node2, node_wsize, root_wsize)
results[node2] = result #score
return results
def etree_fromstring(self, s: str):
root = etree.fromstring(s.encode('utf-8'))
return self.replace_etree_node_to_class(root)
def node_center_point(self, node) -> Point:
lx, ly, rx, ry = map(int, re.findall(r"\d+",
node.attrib.get("bounds")))
return Point((lx + rx) // 2, (ly + ry) // 2)
def match(self, widget: dict, hierarchy=None, window_size: tuple = None):
"""
Args:
widget: widget id
hierarchy (optional): current page hierarchy
window_size (tuple): width and height
Returns:
None or MatchResult(point, score, detail, xpath, node, next_result)
"""
window_size = window_size or self._d.window_size()
hierarchy = hierarchy or self._d.dump_hierarchy()
w = widget.copy()
widget_root = self.etree_fromstring(w['hierarchy'])
widget_node = widget_root.xpath(w['xpath'])[0]
# 节点打分
target_root = self.etree_fromstring(hierarchy)
results = self.compare_hierarchy(widget_node, target_root, w['window_size'], window_size) # yapf: disable
# score结构调整
scores = {}
for node, result in results.items():
scores[node] = self._hybird_result_to_score(result) # score eg: [3.2, 2.2, [1.0, 1.2]]
# 打分排序
nodes = list(scores.keys())
nodes.sort(key=lambda n: scores[n], reverse=True)
possible_nodes = nodes[:10]
# compare image
# screenshot = self._d.screenshot()
# for node in possible_nodes:
# bounds = node.attrib.get("bounds")
# lx, ly, rx, ry = bounds = list(map(int, re.findall(r"\d+", bounds)))
# w, h = rx - lx, ry - ly
# crop_image = screenshot.crop(bounds)
# template = imread(w['target_image']['url'])
# try:
# score = compare_ssim(template, crop_image)
# scores[node][0] += score
# except ValueError:
# pass
# nodes.sort(key=lambda n: scores[n], reverse=True)
first, second = nodes[:2]
MatchResult = namedtuple(
"MatchResult",
["point", "score", "detail", "xpath", "node", "next_result"])
def get_result(node, next_result=None):
point = self.node_center_point(node)
xpath = node.getroottree().getpath(node)
return MatchResult(point, scores[node], results[node], xpath,
node, next_result)
return get_result(first, get_result(second))
def exists(self, id: str) -> bool:
pass
def update_widget(self, id, hierarchy, xpath):
url = self._id2url(id)
r = requests.put(url, json={"hierarchy": hierarchy, "xpath": xpath})
print(r.json())
def wait(self, id: str, timeout=None):
"""
Args:
timeout (float): seconds to wait
Returns:
None or Result
"""
timeout = timeout or self.wait_timeout
widget = self._get_widget(id) # 获取节点信息
begin_time = time.time()
deadline = time.time() + timeout
while time.time() < deadline:
hierarchy = self._d.dump_hierarchy()
hsim = hierarchy_sim(hierarchy, widget['hierarchy'])
app = self._d.app_current()
is_same_activity = widget['activity'] == app['activity']
if not is_same_activity:
print("activity different:", "got", app['activity'], 'expect', widget['activity'])
print("hierarchy: %.1f%%" % hsim)
print("----------------------")
window_size = self._d.window_size()
page_ok = False
if is_same_activity:
if hsim > 0.7:
page_ok = True
if time.time() - begin_time > 10.0 and hsim > 0.6:
page_ok = True
if page_ok:
result = self.match(widget, hierarchy, window_size)
if result.score[0] < 2:
time.sleep(0.5)
continue
if hsim < 0.8:
self.update_widget(id, hierarchy, result.xpath)
return result
time.sleep(1.0)
def click(self, id: str, debug: bool = False, timeout=10):
print("Click", id)
result = self.wait(id, timeout=timeout)
if result is None:
raise RuntimeError("target not found")
x, y = result.point
if debug:
show_click_position(self._d, Point(x, y))
self._d.click(x, y)
# return
# while True:
# hierarchy = self._d.dump_hierarchy()
# hsim = hierarchy_sim(hierarchy, widget['hierarchy'])
# app = self._d.app_current()
# is_same_activity = widget['activity'] == app['activity']
# print("activity same:", is_same_activity)
# print("hierarchy:", hsim)
# window_size = self._d.window_size()
# if is_same_activity and hsim > 0.8:
# result = self.match(widget, hierarchy, window_size)
# pprint(result.score)
# pprint(result.second.score)
# x, y = result.point
# self._d.click(x, y)
# return
# time.sleep(0.1)
# return
def show_click_position(d: u2.Device, point: Point):
# # pprint(result.widget)
# # pprint(dict(result.node.attrib))
im = draw_point(d.screenshot(), point.x, point.y)
im.show()
def main():
d = u2.connect("30.10.93.26")
# d.widget.click("00013#推荐歌单第一首")
d.widget.exists("lo/00019#播放全部")
return
d.widget.click("00019#播放全部")
# d.widget.click("00018#播放暂停")
d.widget.click("00018#播放暂停")
d.widget.click("00021#转到上一层级")
return
d.widget.click("每日推荐")
widget_id = "00009#上新"
widget_id = "00011#每日推荐"
widget_id = "00014#立减20"
result = d.widget.match(widget_id)
# e = Widget(d)
# result = e.match("00003")
# print(result)
# # e.match("00002")
# # result = e.match("00007")
wsize = d.window_size()
from lxml import etree
result = d.widget.match(widget_id)
pprint(result.node.attrib)
pprint(result.score)
pprint(result.detail)
show_click_position(d, result.point)
return
root = etree.parse(
'/Users/shengxiang/Projects/weditor/widgets/00010/hierarchy.xml')
nodes = root.xpath('/hierarchy/node/node/node/node')
a, b = nodes[0], nodes[1]
result = d.widget.hybird_compare_node(a, b, wsize, wsize)
pprint(result)
score = d.widget._hybird_result_to_score(result)
pprint(score)
return
score = d.widget._compare_node(a, b, wsize, wsize)
print(score)
a, b = nodes[0].getparent(), nodes[1].getparent()
score = d.widget._compare_node(a, b, wsize, wsize)
pprint(score)
return
print("score:", result.score)
x, y = result.point
# # pprint(result.widget)
# # pprint(dict(result.node.attrib))
pprint(result.detail)
im = draw_point(d.screenshot(), x, y)
im.show()
if __name__ == "__main__":
main()
|
sriks/titanium_mobile
|
refs/heads/master
|
support/android/androidsdk.py
|
32
|
#!/usr/bin/python
#
# An autodetection utility for the Android SDK
#
import os, sys, platform, glob, subprocess, types, re
DEFAULT_API_LEVEL = 21
android_api_levels = {
3: 'android-1.5',
4: 'android-1.6',
5: 'android-2.0',
6: 'android-2.0.1',
7: 'android-2.1',
8: 'android-2.2',
9: 'android-2.3',
10: 'android-2.3.3',
11: 'android-3.0'
}
class Device:
def __init__(self, name, port=-1, emulator=False, offline=False):
self.name = name
self.port = port
self.emulator = emulator
self.offline = offline
def get_name(self):
return self.name
def get_port(self):
return self.port
def is_emulator(self):
return self.emulator
def is_device(self):
return not self.emulator
def is_offline(self):
return self.offline
class AndroidSDK:
def __init__(self, android_sdk, api_level=DEFAULT_API_LEVEL):
self.android_sdk = self.find_sdk(android_sdk)
if self.android_sdk is None:
raise Exception('No Android SDK directory found')
self.set_api_level(api_level)
def set_api_level(self, level):
self.api_level = level
self.find_platform_dir()
self.find_google_apis_dir()
def try_best_match_api_level(self, level):
# Don't go backwards
if level <= self.api_level:
return
orig_level = self.api_level
orig_platform_dir = self.platform_dir
orig_google_apis_dir = self.google_apis_dir
check_level = level
while check_level > orig_level:
self.find_platform_dir(check_level, False)
if self.platform_dir:
self.api_level = check_level
print "[INFO] Targeting Android SDK version %s" % self.api_level
break
check_level -= 1
if not self.platform_dir:
# Couldn't match. Set it back and return.
self.platform_dir = orig_platform_dir
return
# Now give the Google APIs a chance to match.
check_level = level
while check_level > orig_level:
self.find_google_apis_dir(check_level)
if self.google_apis_dir:
break
check_level -= 1
if not self.google_apis_dir:
# Couldn't match, so set it back to what it was.
self.google_apis_dir = orig_google_apis_dir
def find_sdk(self, supplied):
if platform.system() == 'Windows':
default_dirs = ['C:\\android-sdk', 'C:\\android', 'C:\\Program Files\\android-sdk', 'C:\\Program Files\\android']
else:
default_dirs = ['/opt/android', '/opt/android-sdk', '/usr/android', '/usr/android-sdk']
if 'ANDROID_SDK' in os.environ:
return os.environ['ANDROID_SDK']
if supplied is not None:
return supplied
for default_dir in default_dirs:
if os.path.exists(default_dir):
return default_dir
path = os.environ['PATH']
for dir in os.path.split(os.pathsep):
if os.path.exists(os.path.join(dir, 'android')) \
or os.path.exists(os.path.join(dir, 'android.exe')):
return dir
return None
def find_dir(self, version, prefix):
dirs = glob.glob(os.path.join(self.android_sdk, prefix+str(version)+"*"))
if len(dirs) > 0:
#grab the first.. good enough?
return dirs[0]
return None
def find_platform_dir(self, api_level=-1, raise_error=True):
if api_level == -1:
api_level = self.api_level
platform_dir = self.find_dir(api_level, os.path.join('platforms', 'android-'))
if platform_dir is None:
old_style_dir = os.path.join(self.android_sdk, 'platforms', android_api_levels[api_level])
if os.path.exists(old_style_dir):
platform_dir = old_style_dir
if platform_dir is None and raise_error:
raise Exception("No \"%s\" or \"%s\" in the Android SDK" % ('android-%s' % api_level, android_api_levels[api_level]))
self.platform_dir = platform_dir
def find_google_apis_dir(self, api_level=-1):
if api_level == -1:
api_level = self.api_level
if 'GOOGLE_APIS' in os.environ:
self.google_apis_dir = os.environ['GOOGLE_APIS']
return self.google_apis_dir
self.google_apis_dir = self.find_dir(api_level, os.path.join('add-ons', 'google_apis-'))
if self.google_apis_dir is None:
self.google_apis_dir = self.find_dir(api_level, os.path.join('add-ons', 'addon?google?apis?google*'))
def get_maps_jar(self):
if self.google_apis_dir is not None:
return os.path.join(self.google_apis_dir, "libs", "maps.jar")
return None
def get_android_jar(self):
if self.platform_dir is not None:
return os.path.join(self.platform_dir, "android.jar")
return None
def get_android_sdk(self):
return self.android_sdk
def get_platform_dir(self):
return self.platform_dir
def get_google_apis_dir(self):
return self.google_apis_dir
def get_platform_tools_dir(self):
if self.platform_dir is not None:
platform_tools = os.path.join(self.platform_dir, 'tools')
if os.path.exists(platform_tools):
return platform_tools
return None
def get_sdk_platform_tools_dir(self):
if self.android_sdk is not None:
sdk_platform_tools = os.path.join(self.android_sdk, 'platform-tools')
if os.path.exists(sdk_platform_tools):
return sdk_platform_tools
return None
def get_build_tools_dir(self):
if self.android_sdk is not None:
build_tools = os.path.join(self.android_sdk, 'build-tools')
if os.path.exists(build_tools):
return build_tools
return None
def get_api_level(self):
return self.api_level
def get_tool(self, topdir, tool):
if topdir is not None:
tool_path = os.path.join(topdir, tool)
if platform.system() == "Windows":
if os.path.exists(tool_path+".exe"): return tool_path+".exe"
elif os.path.exists(tool_path+".bat"): return tool_path+".bat"
else: return None
if os.path.exists(tool_path):
return tool_path
return None
def get_sdk_tool(self, tool):
return self.get_tool(os.path.join(self.android_sdk, 'tools'), tool)
def get_platform_tool(self, tool):
platform_tools_dir = self.get_platform_tools_dir()
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
build_tools_dir = self.get_build_tools_dir()
tool_path = None
if platform_tools_dir is not None:
tool_path = self.get_tool(platform_tools_dir, tool)
if tool_path is None and sdk_platform_tools_dir is not None:
tool_path = self.get_tool(sdk_platform_tools_dir, tool)
if tool_path is None or not os.path.exists(tool_path):
tool_path = self.get_sdk_tool(tool)
# Many tools were moved to build-tools/17.0.0 (or something equivalent in windows) in sdk tools r22
if tool_path is None and build_tools_dir is not None:
# Here, we list all the directories in build-tools and check inside
# each one for the tool we are looking for (there can be future versions besides 17.0.0).
for dirname in os.listdir(build_tools_dir):
build_tools_version_dir = os.path.join(build_tools_dir, dirname)
tool_path = self.get_tool(build_tools_version_dir, tool)
if tool_path is not None:
break
return tool_path
def get_dx(self):
return self.get_platform_tool('dx')
def get_dx_jar(self):
platform_tools_dir = self.get_platform_tools_dir()
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
build_tools_dir = self.get_build_tools_dir()
dx_jar_path = None
if platform_tools_dir is not None:
dx_jar_path = self.get_lib_dx_jar(platform_tools_dir)
if sdk_platform_tools_dir is not None and dx_jar_path is None:
dx_jar_path = self.get_lib_dx_jar(sdk_platform_tools_dir)
if build_tools_dir is not None and dx_jar_path is None:
for dirname in os.listdir(build_tools_dir):
build_tools_version_dir = os.path.join(build_tools_dir, dirname)
dx_jar_path = self.get_lib_dx_jar(build_tools_version_dir)
if dx_jar_path is not None:
break
return dx_jar_path
def get_lib_dx_jar(self, topdir):
if topdir is not None:
lib_dx_jar_path = os.path.join(topdir, 'lib', 'dx.jar')
if os.path.exists(lib_dx_jar_path):
return lib_dx_jar_path
return None
def get_dexdump(self):
return self.get_platform_tool('dexdump')
def get_zipalign(self):
return self.get_sdk_tool('zipalign')
def get_aapt(self):
# for aapt (and maybe eventually for others) we
# want to favor platform-tools over android-x/tools
# because of new resource qualifiers for honeycomb
sdk_platform_tools_dir = self.get_sdk_platform_tools_dir()
if not sdk_platform_tools_dir is None and os.path.exists(os.path.join(sdk_platform_tools_dir, 'aapt')):
return os.path.join(sdk_platform_tools_dir, 'aapt')
return self.get_platform_tool('aapt')
def get_apkbuilder(self):
return self.get_sdk_tool('apkbuilder')
def get_android(self):
return self.get_sdk_tool('android')
def get_emulator(self):
return self.get_sdk_tool('emulator')
def get_adb(self):
return self.get_platform_tool('adb')
def get_mksdcard(self):
return self.get_sdk_tool('mksdcard')
def get_aidl(self):
return self.get_platform_tool('aidl')
def sdk_path(self, *path):
return os.path.join(self.android_sdk, *path)
def platform_path(self, *path):
return os.path.join(self.platform_dir, *path)
def google_apis_path(self, *path):
return os.path.join(self.google_apis_dir, *path)
def list_devices(self):
adb = self.get_adb()
(out, err) = subprocess.Popen([adb, 'devices'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if type(err) != types.NoneType and len(err) > 0:
raise Exception(err)
devices = []
for line in out.splitlines():
line = line.strip()
if line.startswith("List of devices"): continue
elif line.startswith("emulator-"):
(name, status) = line.split()
port = int(name[name.index("-")+1:])
offline = False
if status == "offline":
offline = True
devices.append(Device(name, port, True, offline))
elif "device" in line:
name = line.split()[0]
devices.append(Device(name))
return devices
def run_adb(self, args, device_args=None):
adb_args = [self.get_adb()]
if device_args != None:
adb_args.extend(device_args)
adb_args.extend(args)
(out, err) = subprocess.Popen(adb_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if type(err) != types.NoneType and len(err) > 0:
raise Exception(err)
return out
def list_processes(self, adb_args=None):
out = self.run_adb(['shell', 'ps'], adb_args)
processes = []
for line in out.splitlines():
line = line.strip()
tokens = re.split(r"\s+", line)
if len(tokens) < 2: continue
if tokens[0] == 'USER': continue
process = {"pid": tokens[1], "name": tokens[len(tokens)-1]}
processes.append(process)
return processes
def jdwp_kill(self, app_id, adb_args=None, forward_port=51111):
import socket, struct, uuid
pid = None
for process in self.list_processes(adb_args):
if process['name'] == app_id:
pid = process['pid']
break
if pid == None:
raise Exception("No processes running with the name: %s" % app_id)
out = self.run_adb(['jdwp'], adb_args)
found_pid = False
for line in out.splitlines():
if line == pid:
found_pid = True
break
if not found_pid:
raise Exception("The application %s (PID %s) is not debuggable, and cannot be killed via JDWP" % (app_id, pid))
self.run_adb(['forward', 'tcp:%d' % forward_port, 'jdwp:%s' % pid], adb_args)
jdwp_socket = socket.create_connection(('', forward_port))
jdwp_socket.settimeout(5.0)
jdwp_socket.send('JDWP-Handshake')
try:
handshake = jdwp_socket.recv(14)
except:
jdwp_socket.close()
raise Exception('Timeout when waiting for handshake, make sure no other DDMS debuggers are running (i.e. Eclipse)')
if handshake != 'JDWP-Handshake':
jdwp_socket.close()
raise Exception('Incorrect handshake, make sure the process is still running')
# Taken from Android ddmlib
DDMS_CMD = 0x01
DDMS_CMD_SET = 0xc7
# just a random 32 bit integer should be good enough
packetId = uuid.uuid4().time_low
packetLen = 23
# the string EXIT bitshifted into an integer
EXIT = 1163413844
EXIT_LEN = 4
exitCode = 1
packet = struct.pack('!2I3B3I', packetLen, packetId, 0, DDMS_CMD_SET, DDMS_CMD, EXIT, EXIT_LEN, exitCode)
jdwp_socket.send(packet)
jdwp_socket.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s ANDROID_SDK [API]" % sys.argv[0]
print " ANDROID_SDK is the default path to the Android SDK. Use '-' if there is no default path"
print " API (optional) is an Android API version (i.e. 4, 5, 6, 7, 8). The default is 7."
print ""
print "Prints the SDK directory, Android Platform directory, and Google APIs directory"
sys.exit(1)
sdk_path = sys.argv[1]
if sdk_path == '-':
sdk_path = None
api_level = DEFAULT_API_LEVEL
if len(sys.argv) > 2:
api_level = int(sys.argv[2])
try:
sdk = AndroidSDK(sdk_path, api_level)
print "ANDROID_SDK=%s" % sdk.get_android_sdk()
print "ANDROID_API_LEVEL=%d" % sdk.get_api_level()
print "ANDROID_PLATFORM=%s" % sdk.get_platform_dir()
print "GOOGLE_APIS=%s" % sdk.get_google_apis_dir()
except Exception, e:
print >>sys.stderr, e
|
Zentyal/mss
|
refs/heads/samba4-poc
|
modules/davical/__init__.py
|
3
|
# -*- coding: UTF-8 -*-
#
# (c) 2012 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
def get_config_info():
return ('setup-davical.sh', [])
|
kylemsguy/FPGA-Litecoin-Miner
|
refs/heads/master
|
ICARUS-LX150/MiningSoftware/pyserial-2.6/test/test_iolib.py
|
6
|
#! /usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2009 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
"""\
Some tests for the serial module.
Part of pyserial (http://pyserial.sf.net) (C)2001-2009 cliechti@gmx.net
Intended to be run on different platforms, to ensure portability of
the code.
This modules contains test for the interaction between Serial and the io
library. This only works on Python 2.6+ that introduced the io library.
For all these tests a simple hardware is required.
Loopback HW adapter:
Shortcut these pin pairs:
TX <-> RX
RTS <-> CTS
DTR <-> DSR
On a 9 pole DSUB these are the pins (2-3) (4-6) (7-8)
"""
import unittest
import sys
if __name__ == '__main__' and sys.version_info < (2, 6):
sys.stderr.write("""\
==============================================================================
WARNING: this test is intended for Python 2.6 and newer where the io library
is available. This seems to be an older version of Python running.
Continuing anyway...
==============================================================================
""")
import io
import serial
# trick to make that this test run under 2.6 and 3.x without modification.
# problem is, io library on 2.6 does NOT accept type 'str' and 3.x doesn't
# like u'nicode' strings with the prefix and it is not providing an unicode
# function ('str' is now what 'unicode' used to be)
if sys.version_info >= (3, 0):
def unicode(x): return x
# on which port should the tests be performed:
PORT = 0
class Test_SerialAndIO(unittest.TestCase):
def setUp(self):
self.s = serial.serial_for_url(PORT, timeout=1)
self.io = io.TextIOWrapper(io.BufferedRWPair(self.s, self.s))
def tearDown(self):
self.s.close()
def test_hello_raw(self):
self.io.write(unicode("hello\n"))
self.io.flush() # it is buffering. required to get the data out
hello = self.io.readline()
self.failUnlessEqual(hello, unicode("hello\n"))
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: %r\n" % PORT)
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
|
NickelMedia/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/factory.py
|
118
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory method to retrieve the appropriate port implementation."""
import fnmatch
import optparse
import re
from webkitpy.port import builders
def platform_options(use_globs=False):
return [
optparse.make_option('--platform', action='store',
help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
optparse.make_option('--efl', action='store_const', dest='platform',
const=('efl*' if use_globs else 'efl'),
help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
optparse.make_option('--gtk', action='store_const', dest='platform',
const=('gtk*' if use_globs else 'gtk'),
help=('Alias for --platform=gtk*' if use_globs else 'Alias for --platform=gtk')),
optparse.make_option('--qt', action='store_const', dest="platform",
const=('qt*' if use_globs else 'qt'),
help=('Alias for --platform=qt' if use_globs else 'Alias for --platform=qt')),
]
def configuration_options():
return [
optparse.make_option("-t", "--target", dest="configuration", help="(DEPRECATED)"),
# FIXME: --help should display which configuration is default.
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option('--32-bit', action='store_const', const='x86', default=None, dest="architecture",
help='use 32-bit binaries by default (x86 instead of x86_64)'),
]
def _builder_options(builder_name):
configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
is_webkit2 = builder_name.find("WK2") != -1
builder_name = builder_name
return optparse.Values({'builder_name': builder_name, 'configuration': configuration, 'webkit_test_runner': is_webkit2})
class PortFactory(object):
PORT_CLASSES = (
'efl.EflPort',
'gtk.GtkPort',
'mac.MacPort',
'mock_drt.MockDRTPort',
'qt.QtPort',
'test.TestPort',
'win.WinPort',
)
def __init__(self, host):
self._host = host
def _default_port(self, options):
platform = self._host.platform
if platform.is_linux() or platform.is_freebsd():
return 'qt-linux'
elif platform.is_mac():
return 'mac'
elif platform.is_win():
return 'win'
raise NotImplementedError('unknown platform: %s' % platform)
def get(self, port_name=None, options=None, **kwargs):
"""Returns an object implementing the Port interface. If
port_name is None, this routine attempts to guess at the most
appropriate port on this platform."""
port_name = port_name or self._default_port(options)
for port_class in self.PORT_CLASSES:
module_name, class_name = port_class.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [], -1)
cls = module.__dict__[class_name]
if port_name.startswith(cls.port_name):
port_name = cls.determine_full_port_name(self._host, options, port_name)
return cls(self._host, port_name, options=options, **kwargs)
raise NotImplementedError('unsupported platform: "%s"' % port_name)
def all_port_names(self, platform=None):
"""Return a list of all valid, fully-specified, "real" port names.
This is the list of directories that are used as actual baseline_paths()
by real ports. This does not include any "fake" names like "test"
or "mock-mac", and it does not include any directories that are not.
If platform is not specified, we will glob-match all ports"""
platform = platform or '*'
return fnmatch.filter(builders.all_port_names(), platform)
def get_from_builder_name(self, builder_name):
port_name = builders.port_name_for_builder_name(builder_name)
assert port_name, "unrecognized builder name '%s'" % builder_name
return self.get(port_name, _builder_options(builder_name))
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/logging_tests/views.py
|
126
|
from __future__ import unicode_literals
from django.core.exceptions import SuspiciousOperation, DisallowedHost
def suspicious(request):
raise SuspiciousOperation('dubious')
def suspicious_spec(request):
raise DisallowedHost('dubious')
|
smalyshev/pywikibot-core
|
refs/heads/master
|
scripts/pagefromfile.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Bot to upload pages from a file.
This bot takes its input from a file that contains a number of
pages to be put on the wiki. The pages should all have the same
begin and end text (which may not overlap).
By default the text should have the intended title of the page
as the first text in bold (that is, between ''' and '''),
you can modify this behavior with command line options.
The default is not to include the begin and
end text in the page, if you want to include that text, use
the -include option.
Specific arguments:
-begin:xxx Specify the text that marks the beginning of a page
-start:xxx (deprecated)
-end:xxx Specify the text that marks the end of a page
-file:xxx Give the filename we are getting our material from
(default: dict.txt)
-include The beginning and end markers should be included
in the page.
-titlestart:xxx Use xxx in place of ''' for identifying the
beginning of page title
-titleend:xxx Use xxx in place of ''' for identifying the
end of page title
-notitle do not include the title, including titlestart, and
titleend, in the page
-nocontent If page has this statment it doesn't append
(example: -nocontent:"{{infobox")
-noredirect if you don't want to upload on redirect page
it is True by default and bot adds pages to redirected pages
-summary:xxx Use xxx as the edit summary for the upload - if
a page exists, standard messages are appended
after xxx for appending, prepending, or replacement
-autosummary Use MediaWikis autosummary when creating a new page,
overrides -summary in this case
-minor set minor edit flag on page edits
-showdiff show difference between page and page to upload; it forces
-always=False; default to False.
If the page to be uploaded already exists:
-safe do nothing (default)
-appendtop add the text to the top of it
-appendbottom add the text to the bottom of it
-force overwrite the existing page
It's possible to define a separator after the 'append' modes which is added
between the exisiting and new text. For example -appendtop:foo would add 'foo'
between the parts. The \n (two separate characters) is replaced by the newline
character.
"""
#
# (C) Andre Engels, 2004
# (C) Pywikibot team, 2005-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import os
import re
from warnings import warn
import pywikibot
from pywikibot import config, Bot, i18n
from pywikibot.exceptions import ArgumentDeprecationWarning
class NoTitle(Exception):
"""No title found."""
def __init__(self, offset):
"""Constructor."""
self.offset = offset
class PageFromFileRobot(Bot):
"""
Responsible for writing pages to the wiki.
Titles and contents are given by a PageFromFileReader.
"""
def __init__(self, reader, **kwargs):
"""Constructor."""
self.availableOptions.update({
'always': True,
'force': False,
'append': None,
'summary': None,
'minor': False,
'autosummary': False,
'nocontent': '',
'redirect': True,
'showdiff': False,
})
super(PageFromFileRobot, self).__init__(**kwargs)
self.availableOptions.update(
{'always': False if self.getOption('showdiff') else True})
self.reader = reader
def run(self):
"""Start file processing and upload content."""
for title, contents in self.reader.run():
self.save(title, contents)
def save(self, title, contents):
"""Upload page content."""
mysite = pywikibot.Site()
page = pywikibot.Page(mysite, title)
self.current_page = page
if self.getOption('summary'):
comment = self.getOption('summary')
else:
comment = i18n.twtranslate(mysite, 'pagefromfile-msg')
comment_top = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_top')
comment_bottom = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_bottom')
comment_force = "%s *** %s ***" % (
comment, i18n.twtranslate(mysite, 'pagefromfile-msg_force'))
# Remove trailing newlines (cause troubles when creating redirects)
contents = re.sub('^[\r\n]*', '', contents)
if page.exists():
if not self.getOption('redirect') and page.isRedirectPage():
pywikibot.output(u"Page %s is redirect, skipping!" % title)
return
pagecontents = page.get(get_redirect=True)
nocontent = self.getOption('nocontent')
if nocontent and (
nocontent in pagecontents or
nocontent.lower() in pagecontents):
pywikibot.output('Page has %s so it is skipped' % nocontent)
return
if self.getOption('append'):
separator = self.getOption('append')[1]
if separator == r'\n':
separator = '\n'
if self.getOption('append')[0] == 'top':
above, below = contents, pagecontents
comment = comment_top
else:
above, below = pagecontents, contents
comment = comment_bottom
pywikibot.output('Page {0} already exists, appending on {1}!'.format(
title, self.getOption('append')[0]))
contents = above + separator + below
elif self.getOption('force'):
pywikibot.output(u"Page %s already exists, ***overwriting!"
% title)
comment = comment_force
else:
pywikibot.output(u"Page %s already exists, not adding!" % title)
return
else:
if self.getOption('autosummary'):
comment = ''
config.default_edit_summary = ''
self.userPut(page, page.text, contents,
summary=comment,
minor=self.getOption('minor'),
show_diff=self.getOption('showdiff'),
ignore_save_related_errors=True)
class PageFromFileReader(object):
"""
Responsible for reading the file.
The run() method yields a (title, contents) tuple for each found page.
"""
def __init__(self, filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include, notitle):
"""Constructor.
Check if self.file name exists. If not, ask for a new filename.
User can quit.
"""
self.filename = filename
self.pageStartMarker = pageStartMarker
self.pageEndMarker = pageEndMarker
self.titleStartMarker = titleStartMarker
self.titleEndMarker = titleEndMarker
self.include = include
self.notitle = notitle
def run(self):
"""Read file and yield page title and content."""
pywikibot.output('\n\nReading \'%s\'...' % self.filename)
try:
with codecs.open(self.filename, 'r',
encoding=config.textfile_encoding) as f:
text = f.read()
except IOError as err:
pywikibot.output(str(err))
raise IOError
position = 0
length = 0
while True:
try:
length, title, contents = self.findpage(text[position:])
except AttributeError:
if not length:
pywikibot.output(u'\nStart or end marker not found.')
else:
pywikibot.output(u'End of file.')
break
except NoTitle as err:
pywikibot.output(u'\nNo title found - skipping a page.')
position += err.offset
continue
position += length
yield title, contents
def findpage(self, text):
"""Find page to work on."""
pageR = re.compile(re.escape(self.pageStartMarker) + "(.*?)" +
re.escape(self.pageEndMarker), re.DOTALL)
titleR = re.compile(re.escape(self.titleStartMarker) + "(.*?)" +
re.escape(self.titleEndMarker))
location = pageR.search(text)
if self.include:
contents = location.group()
else:
contents = location.group(1)
try:
title = titleR.search(contents).group(1)
if self.notitle:
# Remove title (to allow creation of redirects)
contents = titleR.sub('', contents, count=1)
except AttributeError:
raise NoTitle(location.end())
else:
return location.end(), title, contents
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Adapt these to the file you are using. 'pageStartMarker' and
# 'pageEndMarker' are the beginning and end of each entry. Take text that
# should be included and does not occur elsewhere in the text.
# TODO: make config variables for these.
filename = "dict.txt"
pageStartMarker = "{{-start-}}"
pageEndMarker = "{{-stop-}}"
titleStartMarker = u"'''"
titleEndMarker = u"'''"
options = {}
include = False
notitle = False
for arg in pywikibot.handle_args(args):
if arg.startswith('-start:'):
pageStartMarker = arg[7:]
warn('-start param (text that marks the beginning) of a page has been '
'deprecated in favor of begin; make sure to use the updated param.',
ArgumentDeprecationWarning)
elif arg.startswith('-begin:'):
pageStartMarker = arg[len('-begin:'):]
elif arg.startswith("-end:"):
pageEndMarker = arg[5:]
elif arg.startswith("-file:"):
filename = arg[6:]
elif arg == "-include":
include = True
elif arg.startswith('-appendbottom'):
options['append'] = ('bottom', arg[len('-appendbottom:'):])
elif arg.startswith('-appendtop'):
options['append'] = ('top', arg[len('-appendtop:'):])
elif arg == "-force":
options['force'] = True
elif arg == "-safe":
options['force'] = False
options['append'] = None
elif arg == "-noredirect":
options['redirect'] = False
elif arg == '-notitle':
notitle = True
elif arg == '-minor':
options['minor'] = True
elif arg.startswith('-nocontent:'):
options['nocontent'] = arg[11:]
elif arg.startswith("-titlestart:"):
titleStartMarker = arg[12:]
elif arg.startswith("-titleend:"):
titleEndMarker = arg[10:]
elif arg.startswith("-summary:"):
options['summary'] = arg[9:]
elif arg == '-autosummary':
options['autosummary'] = True
elif arg == '-showdiff':
options['showdiff'] = True
else:
pywikibot.output(u"Disregarding unknown argument %s." % arg)
failed_filename = False
while not os.path.isfile(filename):
pywikibot.output('\nFile \'%s\' does not exist. ' % filename)
_input = pywikibot.input(
'Please enter the file name [q to quit]:')
if _input == 'q':
failed_filename = True
break
else:
filename = _input
# show help text from the top of this file if reader failed
# or User quit.
if failed_filename:
pywikibot.bot.suggest_help(missing_parameters=['-file'])
return False
else:
reader = PageFromFileReader(filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include,
notitle)
bot = PageFromFileRobot(reader, **options)
bot.run()
if __name__ == "__main__":
main()
|
ktsitsikas/odemis
|
refs/heads/master
|
src/odemis/acq/drift/__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on 27 Feb 2014
@author: Kimon Tsitsikas
Copyright © 2013-2014 Éric Piel & Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
import itertools
import logging
import numpy
import threading
import math
from .calculation import CalculateDrift
from .dc_region import GuessAnchorRegion
MIN_RESOLUTION = (20, 20) # seems 10x10 sometimes work, but let's not tent it
MAX_PIXELS = 128 ** 2 # px
class AnchoredEstimator(object):
"""
Drift estimator based on an "anchor" area. Periodically, a small region
(the anchor) is scanned. By comparing the images of the anchor area over
time, an estimation of the drift is computed.
To use, call .acquire() periodically (and preferably at specific places of
the global acquire, such as at the beginning of a line), and call .estimate()
to measure the drift.
"""
def __init__(self, scanner, detector, region, dwell_time):
"""
scanner (Emitter)
detector (Detector)
region (4 floats)
dwell_time (float)
"""
self._emitter = scanner
self._semd = detector
self._dwell_time = dwell_time
self.orig_drift = (0, 0) # in sem px
self.max_drift = (0, 0) # in sem px
self.raw = [] # first 2 and last 2 anchor areas acquired (in order)
self._acq_sem_complete = threading.Event()
# Calculate initial translation for anchor region acquisition
self._roi = region
center = ((self._roi[0] + self._roi[2]) / 2,
(self._roi[1] + self._roi[3]) / 2)
width = (self._roi[2] - self._roi[0], self._roi[3] - self._roi[1])
shape = self._emitter.shape
# translation is distance from center (situated at 0.5, 0.5), can be floats
self._trans = (shape[0] * (center[0] - 0.5),
shape[1] * (center[1] - 0.5))
# resolution is the maximum resolution at the scale in proportion of the width
# First, try the finest scale (=1)
self._res = (max(1, int(round(shape[0] * width[0] / 1))),
max(1, int(round(shape[1] * width[1] / 1))))
# Demand large enough anchor region for drift calculation
if self._res[0] < MIN_RESOLUTION[0] or self._res[1] < MIN_RESOLUTION[1]:
old_res = tuple(self._res)
self._res = tuple(max(a, b) for a, b in zip(self._res, MIN_RESOLUTION))
logging.warning("Anchor region too small %s, will be set to %s",
old_res, self._res)
# Adjust the scale to the anchor region so the image has at the
# maximum MAX_PIXELS. This way we guarantee that the pixels density of
# the anchor region is enough to calculate the drift and at the same
# time to avoid prolonged exposure times that extremely increase the
# acquisition time.
ratio = math.sqrt(numpy.prod(self._res) / MAX_PIXELS)
self._scale = scanner.scale.clip((ratio, ratio))
# adjust resolution based on the new scale
self._res = (max(MIN_RESOLUTION[0], int(round(shape[0] * width[0] / self._scale[0]))),
max(MIN_RESOLUTION[1], int(round(shape[1] * width[1] / self._scale[1]))))
logging.info("Anchor region defined with scale=%s, res=%s, trans=%s",
self._scale, self._res, self._trans)
self._safety_bounds = (0.99 * (shape[0] / 2), 0.99 * (shape[1] / 2))
self._min_bound = -self._safety_bounds[0] + (max(self._res[0],
self._res[1]) / 2)
self._max_bound = self._safety_bounds[1] - (max(self._res[0],
self._res[1]) / 2)
def acquire(self):
"""
Scan the anchor area
"""
# Save current SEM settings
cur_dwell_time = self._emitter.dwellTime.value
cur_scale = self._emitter.scale.value
cur_resolution = self._emitter.resolution.value
cur_trans = self._emitter.translation.value
try:
self._updateSEMSettings()
logging.debug("E-beam spot to anchor region: %s",
self._emitter.translation.value)
logging.debug("Scanning anchor region with resolution "
"%s and dwelltime %s and scale %s",
self._emitter.resolution.value,
self._emitter.dwellTime.value,
self._emitter.scale.value)
data = self._semd.data.get(asap=False)
if data.shape[::-1] != self._res:
logging.warning("Shape of data is %s instead of %s", data.shape[::-1], self._res)
# TODO: allow to record just every Nth image, and separately record the
# drift after every measurement
# In the mean time, we only save the 1st, 2nd and last two images
if len(self.raw) > 2:
self.raw = self.raw[0:2] + self.raw[-1:]
else:
self.raw = self.raw[0:2]
self.raw.append(data)
finally:
# Restore SEM settings
self._emitter.dwellTime.value = cur_dwell_time
self._emitter.scale.value = cur_scale
self._emitter.resolution.value = cur_resolution
self._emitter.translation.value = cur_trans
def estimate(self):
"""
Estimate the additional drift since previous acquisition+estimation.
Note: It should be only called once after every acquisition.
To read the value again, use .orig_drift.
return (float, float): estimated current drift in X/Y SEM px
"""
# Calculate the drift between the last two frames and
# between the last and first frame
if len(self.raw) > 1:
# Note: prev_drift and orig_drift, don't represent exactly the same
# value as the previous image also had drifted. So we need to
# include also the drift of the previous image.
# Also, CalculateDrift return the shift in image pixels, which is
# different (usually bigger) from the SEM px.
prev_drift = CalculateDrift(self.raw[-2], self.raw[-1], 10)
prev_drift = (prev_drift[0] * self._scale[0] + self.orig_drift[0],
prev_drift[1] * self._scale[1] + self.orig_drift[1])
orig_drift = CalculateDrift(self.raw[0], self.raw[-1], 10)
self.orig_drift = (orig_drift[0] * self._scale[0],
orig_drift[1] * self._scale[1])
logging.debug("Current drift: %s", self.orig_drift)
logging.debug("Previous frame diff: %s", prev_drift)
if (abs(self.orig_drift[0] - prev_drift[0]) > 5 or
abs(self.orig_drift[1] - prev_drift[1]) > 5):
logging.warning("Drift cannot be measured precisely, "
"hesitating between %s and %s px",
self.orig_drift, prev_drift)
# Update max_drift
if math.hypot(*self.orig_drift) > math.hypot(*self.max_drift):
self.max_drift = self.orig_drift
return self.orig_drift
def estimateAcquisitionTime(self):
"""
return (float): estimated time to acquire 1 anchor area
"""
anchor_time = numpy.prod(self._res) * self._dwell_time + 0.01
return anchor_time
@staticmethod
def estimateCorrectionPeriod(period, dwell_time, repetitions):
"""
Convert the correction period (as a time) into a number of pixel
period (float): maximum time between acquisition of the anchor region
in seconds.
dwell_time (float): integration time of each pixel in the drift-
corrected acquisition.
repetitions (tuple of 2 ints): number of pixel in the entire drift-
corrected acquisition.
First value is the fastest dimension scanned (X).
return (iterator yielding 0<int): iterator which yields number of pixels
until next correction
"""
# TODO: implement more clever calculation
pxs_dc_period = []
pxs = int(period // dwell_time) # number of pixels per period
pxs_per_line = repetitions[0]
if pxs >= pxs_per_line:
# Correct every (pxs // pxs_per_line) lines
pxs_dc_period.append((pxs // pxs_per_line) * pxs_per_line)
elif pxs <= 1: # also catches cases that would be 1,1,2,1,...
# Correct every pixel
pxs_dc_period.append(1)
else:
# Correct every X or X+1 pixel
# number of acquisition per line
nacq = int((pxs_per_line * dwell_time) // period)
# average duration of a period when fitted to the line
avgp = pxs_per_line / nacq
tot_pxi = 0 # total pixels rounded down
for i in range(1, nacq):
prev_tot_pxi = tot_pxi
tot_pxi = int(avgp * i)
pxs_dc_period.append(tot_pxi - prev_tot_pxi)
else:
# last one explicit, to avoid floating point errors
pxs_dc_period.append(pxs_per_line - tot_pxi)
logging.debug("Drift correction will be performed every %s pixels",
pxs_dc_period)
return itertools.cycle(pxs_dc_period)
def _updateSEMSettings(self):
"""
Update the scanning area of the SEM according to the anchor region
for drift correction.
"""
# translation is distance from center (situated at 0.5, 0.5), can be floats
# we clip translation inside of bounds in case of huge drift
new_translation = (self._trans[0] - self.orig_drift[0],
self._trans[1] - self.orig_drift[1])
if (abs(new_translation[0]) > self._safety_bounds[0]
or abs(new_translation[1]) > self._safety_bounds[1]):
logging.warning("Generated image may be incorrect due to extensive "
"drift of %s", new_translation)
self._trans = (numpy.clip(new_translation[0], self._min_bound, self._max_bound),
numpy.clip(new_translation[1], self._min_bound, self._max_bound))
# always in this order
self._emitter.scale.value = self._scale
self._emitter.resolution.value = self._res
self._emitter.translation.value = self._trans
self._emitter.dwellTime.value = self._dwell_time
|
mogotest/selenium
|
refs/heads/master
|
remote/client/src/py/errorhandler.py
|
1
|
# Copyright 2010 WebDriver committers
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..common.exceptions import ElementNotSelectableException
from ..common.exceptions import ElementNotVisibleException
from ..common.exceptions import InvalidCookieDomainException
from ..common.exceptions import InvalidElementStateException
from ..common.exceptions import NoSuchElementException
from ..common.exceptions import NoSuchFrameException
from ..common.exceptions import NoSuchWindowException
from ..common.exceptions import StaleElementReferenceException
from ..common.exceptions import UnableToSetCookieException
from ..common.exceptions import ErrorInResponseException
class ErrorCode(object):
"""Error codes defined in the WebDriver wire protocol."""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = 7
NO_SUCH_FRAME = 8
UNKNOWN_COMMAND = 9
STALE_ELEMENT_REFERENCE = 10
ELEMENT_NOT_VISIBLE = 11
INVALID_ELEMENT_STATE = 12
UNKNOWN_ERROR = 13
ELEMENT_IS_NOT_SELECTABLE = 15
XPATH_LOOKUP_ERROR = 19
NO_SUCH_WINDOW = 23
INVALID_COOKIE_DOMAIN = 24
UNABLE_TO_SET_COOKIE = 25
class ErrorHandler(object):
"""Handles errors returned by the WebDriver server."""
def check_response(self, response):
"""Checks that a JSON response from the WebDriver does not have an error.
Args:
response - The JSON response from the WebDriver server as a dictionary
object.
Raises:
If the response contains an error message.
"""
status = response['status']
if status == ErrorCode.SUCCESS:
return
exception_class = ErrorInResponseException
if status == ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status == ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status == ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status == ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status == ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status == ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status == ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status == ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = InvalidCookieDomainException
elif status == ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = UnableToSetCookieException
value = response['value']
if type(value) is str:
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
message = ''
if 'message' in value:
message = value['message']
# TODO: What about 'screen' and 'stackTrace'?
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
raise exception_class(message)
|
brandonium21/snowflake
|
refs/heads/master
|
snowflakeEnv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py
|
427
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
|
natefoo/pip
|
refs/heads/develop
|
pip/cmdoptions.py
|
34
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help='Base URL of Python Package Index (default %default).')
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.'
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory,"
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
|
bukepo/openthread
|
refs/heads/master
|
tools/harness-automation/cases/router_5_1_2.py
|
18
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_5_1_2(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 1 2'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
angelapper/edx-platform
|
refs/heads/master
|
openedx/features/course_bookmarks/plugins.py
|
8
|
"""
Platform plugins to support course bookmarks.
"""
from courseware.access import has_access
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from openedx.features.course_experience.course_tools import CourseTool
from student.models import CourseEnrollment
class CourseBookmarksTool(CourseTool):
"""
The course bookmarks tool.
"""
@classmethod
def analytics_id(cls):
"""
Returns an id to uniquely identify this tool in analytics events.
"""
return 'edx.bookmarks'
@classmethod
def is_enabled(cls, request, course_key):
"""
The bookmarks tool is only enabled for enrolled users or staff.
"""
if has_access(request.user, 'staff', course_key):
return True
return CourseEnrollment.is_enrolled(request.user, course_key)
@classmethod
def title(cls):
"""
Returns the title of this tool.
"""
return _('Bookmarks')
@classmethod
def icon_classes(cls):
"""
Returns the icon classes needed to represent this tool.
"""
return 'fa fa-bookmark'
@classmethod
def url(cls, course_key):
"""
Returns the URL for this tool for the specified course key.
"""
return reverse('openedx.course_bookmarks.home', args=[course_key])
|
dsiganos/git-repo
|
refs/heads/master
|
command.py
|
9
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
manifest = None
_optparse = None
def WantPager(self, opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
self._optparse = optparse.OptionParser(usage = usage)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, path):
project = None
if os.path.exists(path):
oldpath = None
while path \
and path != oldpath \
and path != self.manifest.topdir:
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, missing_ok=False, submodules_ok=False):
"""A list of projects that match the arguments.
"""
all_projects = self.manifest.projects
result = []
mp = self.manifest.manifestProject
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'default,platform-' + platform.system().lower()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
all_projects_list = list(all_projects.values())
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if ((missing_ok or project.Exists) and
project.MatchesGroups(groups)):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects.values())
for arg in args:
project = all_projects.get(arg)
if not project:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and
(submodules_ok or project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(path) or project
if not project:
raise NoSuchProjectError(arg)
if not missing_ok and not project.Exists:
raise NoSuchProjectError(arg)
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.append(project)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
if pattern.search(project.name) or pattern.search(project.relpath):
result.append(project)
break
result.sort(key=lambda project: project.relpath)
return result
# pylint: disable=W0223
# Pylint warns that the `InteractiveCommand` and `PagedCommand` classes do not
# override method `Execute` which is abstract in `Command`. Since that method
# is always implemented in classes derived from `InteractiveCommand` and
# `PagedCommand`, this warning can be suppressed.
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, opt):
return True
# pylint: enable=W0223
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
|
wimac/home
|
refs/heads/master
|
Dropbox/skel/bin/sick-beard/cherrypy/lib/caching.py
|
35
|
import datetime
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
class Cache(object):
def get(self):
raise NotImplemented
def put(self, obj, size):
raise NotImplemented
def delete(self):
raise NotImplemented
def clear(self):
raise NotImplemented
# ------------------------------- Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None (the default), and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading._Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading._Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in self.store[uri].selecting_headers.
The items contained in self.store[uri] have keys which are tuples of request
header values (in the same order as the names in its selecting_headers),
and values which are the actual responses.
"""
maxobjects = 1000
maxobj_size = 100000
maxsize = 10000000
delay = 600
antistampede_timeout = 5
expire_freq = 0.1
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
t.daemon = True
else:
t.setDaemon(True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
# expire_cache runs in a separate thread which the servers are
# not aware of. It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in self.expirations.items():
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][sel_header_values]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
variant = uricache.wait(key=tuple(header_values),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
uricache[tuple(header_values)] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, "_cache"):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect, x:
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ''.join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
'secs' must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to (response.time + secs).
If 'secs' is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
'Pragma': 'no-cache'
'Cache-Control': 'no-cache, must-revalidate'
If 'force' is False (the default), the following headers are checked:
'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present,
none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ("Pragma" not in headers):
headers["Pragma"] = "no-cache"
if cherrypy.serving.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
|
izapolsk/integration_tests
|
refs/heads/master
|
cfme/fixtures/artifactor_plugin.py
|
1
|
"""An example config::
artifactor:
log_dir: /home/test/workspace/cfme_tests/artiout
per_run: test #test, run, None
reuse_dir: True
squash_exceptions: False
threaded: False
server_address: 127.0.0.1
server_port: 21212
server_enabled: True
plugins:
``log_dir`` is the destination for all artifacts
``per_run`` denotes if the test artifacts should be group by run, test, or None
``reuse_dir`` if this is False and Artifactor comes across a dir that has
already been used, it will die
"""
import atexit
import os
import subprocess
from threading import RLock
import diaper
import pytest
from artifactor import ArtifactorClient
from cfme.fixtures.pytest_store import store
from cfme.fixtures.pytest_store import write_line
from cfme.markers.polarion import extract_polarion_ids
from cfme.utils.appliance import find_appliance
from cfme.utils.blockers import Blocker
from cfme.utils.blockers import BZ
from cfme.utils.conf import credentials
from cfme.utils.conf import env
from cfme.utils.log import logger
from cfme.utils.net import net_check
from cfme.utils.net import random_port
from cfme.utils.wait import wait_for
UNDER_TEST = False # set to true for artifactor using tests
# Create a list of all our passwords for use with the sanitize request later in this module
# Filter out all Nones as it will mess the output up.
words = [word for word
in {v.get('password') for v in credentials.values()}
if word is not None]
def get_test_idents(item):
try:
return item.location[2], item.location[0]
except AttributeError:
try:
return item.fspath.strpath, None
except AttributeError:
return (None, None)
def get_name(obj):
return (getattr(obj, '_param_name', None) or
getattr(obj, 'name', None) or
str(obj))
class DummyClient(object):
def fire_hook(self, *args, **kwargs):
return
def terminate(self):
return
def task_status(self):
return
def __bool__(self):
# DummyClient is always False,
# so it's easy to see if we have an artiactor client
return False
def get_client(art_config, pytest_config):
if art_config and not UNDER_TEST:
port = getattr(pytest_config.option, 'artifactor_port', None) or \
art_config.get('server_port') or random_port()
pytest_config.option.artifactor_port = port
art_config['server_port'] = port
return ArtifactorClient(
art_config['server_address'], art_config['server_port'])
else:
return DummyClient()
def spawn_server(config, art_client):
if store.slave_manager or UNDER_TEST:
return None
import subprocess
cmd = ['miq-artifactor-server', '--port', str(art_client.port)]
if config.getvalue('run_id'):
cmd.append('--run-id')
cmd.append(str(config.getvalue('run_id')))
proc = subprocess.Popen(cmd)
return proc
session_ver = None
session_build = None
session_stream = None
session_fw_version = None
def pytest_addoption(parser):
parser.addoption("--run-id", action="store", default=None,
help="A run id to assist in logging")
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
if config.getoption('--help'):
return
art_client = get_client(
art_config=env.get('artifactor', {}),
pytest_config=config)
# just in case
if not store.slave_manager:
with diaper:
atexit.register(shutdown, config)
if art_client:
config._art_proc = spawn_server(config, art_client)
wait_for(
net_check,
func_args=[art_client.port, '127.0.0.1'],
func_kwargs={'force': True},
num_sec=10, message="wait for artifactor to start")
art_client.ready = True
else:
config._art_proc = None
from cfme.utils.log import artifactor_handler
artifactor_handler.artifactor = art_client
if store.slave_manager:
artifactor_handler.slaveid = store.slaveid
config._art_client = art_client
def fire_art_hook(config, hook, **hook_args):
client = getattr(config, '_art_client', None)
if client is None:
assert UNDER_TEST, 'missing artifactor is only valid for inprocess tests'
else:
return client.fire_hook(hook, **hook_args)
def fire_art_test_hook(node, hook, **hook_args):
name, location = get_test_idents(node)
return fire_art_hook(
node.config, hook,
test_name=name,
test_location=location,
**hook_args)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
global session_ver
global session_build
global session_stream
appliance = find_appliance(item)
if not session_ver:
session_ver = str(appliance.version)
session_build = appliance.build
session_stream = appliance.version.stream()
if str(session_ver) not in session_build:
session_build = "{}-{}".format(str(session_ver), session_build)
session_fw_version = None
try:
proc = subprocess.Popen(['git', 'describe', '--tags'],
stdout=subprocess.PIPE)
proc.wait()
session_fw_version = proc.stdout.read().strip()
except Exception:
pass # already set session_fw_version to None
fire_art_hook(
item.config, 'session_info',
version=session_ver,
build=session_build,
stream=session_stream,
fw_version=session_fw_version
)
tier = item.get_closest_marker('tier')
if tier:
tier = tier.args[0]
requirement = item.get_closest_marker('requirement')
if requirement:
requirement = requirement.args[0]
param_dict = {}
try:
params = item.callspec.params
param_dict = {p: get_name(v) for p, v in params.items()}
except Exception:
pass # already set param_dict
ip = appliance.hostname
# This pre_start_test hook is needed so that filedump is able to make get the test
# object set up before the logger starts logging. As the logger fires a nested hook
# to the filedumper, and we can't specify order inriggerlib.
meta = item.get_closest_marker('meta')
if meta and 'blockers' in meta.kwargs:
blocker_spec = meta.kwargs['blockers']
blockers = []
for blocker in blocker_spec:
if isinstance(blocker, int):
blockers.append(BZ(blocker).url)
else:
blockers.append(Blocker.parse(blocker).url)
else:
blockers = []
fire_art_test_hook(
item, 'pre_start_test',
slaveid=store.slaveid, ip=ip)
fire_art_test_hook(
item, 'start_test',
slaveid=store.slaveid, ip=ip,
tier=tier, requirement=requirement, param_dict=param_dict, issues=blockers)
yield
def pytest_runtest_teardown(item, nextitem):
name, location = get_test_idents(item)
app = find_appliance(item)
ip = app.hostname
fire_art_test_hook(
item, 'finish_test',
slaveid=store.slaveid, ip=ip, wait_for_task=True)
fire_art_test_hook(item, 'sanitize', words=words)
jenkins_data = {
'build_url': os.environ.get('BUILD_URL'),
'build_number': os.environ.get('BUILD_NUMBER'),
'git_commit': os.environ.get('GIT_COMMIT'),
'job_name': os.environ.get('JOB_NAME')
}
param_dict = None
try:
caps = app.browser.widgetastic.selenium.capabilities
param_dict = {
'browserName': caps.get('browserName', 'Unknown'),
'browserPlatform': caps.get('platformName', caps.get('platform', 'Unknown')),
'browserVersion': caps.get('browserVersion', caps.get('version', 'Unknown'))
}
except Exception:
logger.exception("Couldn't grab browser env_vars")
pass # already set param_dict
fire_art_test_hook(
item, 'ostriz_send', env_params=param_dict,
slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
def pytest_runtest_logreport(report):
if store.slave_manager:
return # each node does its own reporting
config = store.config # tech debt
name, location = get_test_idents(report)
xfail = hasattr(report, 'wasxfail')
if hasattr(report, 'skipped'):
if report.skipped:
fire_art_hook(
config, 'filedump',
test_location=location, test_name=name,
description="Short traceback",
contents=report.longreprtext,
file_type="short_tb", group_id="skipped")
fire_art_hook(
config, 'report_test',
test_location=location, test_name=name,
test_xfail=xfail, test_when=report.when,
test_outcome=report.outcome,
test_phase_duration=report.duration)
fire_art_hook(config, 'build_report')
@pytest.hookimpl(hookwrapper=True)
def pytest_unconfigure(config):
yield
shutdown(config)
lock = RLock()
def shutdown(config):
app = find_appliance(config, require=False)
if app is not None:
with lock:
proc = config._art_proc
if proc and proc.returncode is None:
if not store.slave_manager:
write_line('collecting artifacts')
fire_art_hook(config, 'finish_session')
if not store.slave_manager:
config._art_client.terminate()
proc.wait()
|
michael-dev2rights/ansible
|
refs/heads/ansible-d2r
|
lib/ansible/modules/network/aci/aci_bd.py
|
22
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_bd
short_description: Manage Bridge Domains (BD) on Cisco ACI Fabrics (fv:BD)
description:
- Manages Bridge Domains (BD) on Cisco ACI Fabrics.
- More information from the internal APIC class
I(fv:BD) at U(https://developer.cisco.com/media/mim-ref/MO-fvBD.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
requirements:
- ACI Fabric 1.0(3f)+
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
arp_flooding:
description:
- Determines if the Bridge Domain should flood ARP traffic.
- The APIC defaults new Bridge Domains to C(no).
choices: [ no, yes ]
default: no
bd:
description:
- The name of the Bridge Domain.
aliases: [ bd_name, name ]
bd_type:
description:
- The type of traffic on the Bridge Domain.
- The APIC defaults new Bridge Domains to C(ethernet).
choices: [ ethernet, fc ]
default: ethernet
description:
description:
- Description for the Bridge Domain.
enable_multicast:
description:
- Determines if PIM is enabled
- The APIC defaults new Bridge Domains to C(no).
choices: [ no, yes ]
default: no
enable_routing:
description:
- Determines if IP forwarding should be allowed.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
default: yes
endpoint_clear:
description:
- Clears all End Points in all Leaves when C(yes).
- The APIC defaults new Bridge Domains to C(no).
- The value is not reset to disabled once End Points have been cleared; that requires a second task.
choices: [ no, yes ]
default: no
endpoint_move_detect:
description:
- Determines if GARP should be enabled to detect when End Points move.
- The APIC defaults new Bridge Domains to C(garp).
choices: [ default, garp ]
default: garp
endpoint_retention_action:
description:
- Determines if the Bridge Domain should inherit or resolve the End Point Retention Policy.
- The APIC defaults new Bridge Domain to End Point Retention Policies to C(resolve).
choices: [ inherit, resolve ]
default: resolve
endpoint_retention_policy:
description:
- The name of the End Point Retention Policy the Bridge Domain should use when
overriding the default End Point Retention Policy.
igmp_snoop_policy:
description:
- The name of the IGMP Snooping Policy the Bridge Domain should use when
overriding the default IGMP Snooping Policy.
ip_learning:
description:
- Determines if the Bridge Domain should learn End Point IPs.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
ipv6_nd_policy:
description:
- The name of the IPv6 Neighbor Discovery Policy the Bridge Domain should use when
overridding the default IPV6 ND Policy.
l2_unknown_unicast:
description:
- Determines what forwarding method to use for unknown l2 destinations.
- The APIC defaults new Bridge domains to C(proxy).
choices: [ proxy, flood ]
default: proxy
l3_unknown_multicast:
description:
- Determines the forwarding method to use for unknown multicast destinations.
- The APCI defaults new Bridge Domains to C(flood).
choices: [ flood, opt-flood ]
default: flood
limit_ip_learn:
description:
- Determines if the BD should limit IP learning to only subnets owned by the Bridge Domain.
- The APIC defaults new Bridge Domains to C(yes).
choices: [ no, yes ]
default: yes
multi_dest:
description:
- Determines the forwarding method for L2 multicast, broadcast, and link layer traffic.
- The APIC defaults new Bridge Domains to C(bd-flood).
choices: [ bd-flood, drop, encap-flood ]
default: bd-flood
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
vrf:
description:
- The name of the VRF.
aliases: [ vrf_name ]
'''
EXAMPLES = r'''
- name: Add Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: false
state: present
tenant: prod
bd: web_servers
vrf: prod_vrf
- name: Add an FC Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: false
state: present
tenant: prod
bd: storage
bd_type: fc
vrf: fc_vrf
enable_routing: no
- name: Modify a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: present
tenant: prod
bd: web_servers
arp_flooding: yes
l2_unknown_unicast: flood
- name: Query All Bridge Domains
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: query
- name: Query a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: query
tenant: prod
bd: web_servers
- name: Delete a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: true
state: absent
tenant: prod
bd: web_servers
'''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
arp_flooding=dict(choices=['no', 'yes']),
bd=dict(type='str', aliases=['bd_name', 'name']),
bd_type=dict(type='str', choices=['ethernet', 'fc']),
description=dict(type='str'),
enable_multicast=dict(type='str', choices=['no', 'yes']),
enable_routing=dict(type='str', choices=['no', 'yes']),
endpoint_clear=dict(type='str', choices=['no', 'yes']),
endpoint_move_detect=dict(type='str', choices=['default', 'garp']),
endpoint_retention_action=dict(type='str', choices=['inherit', 'resolve']),
endpoint_retention_policy=dict(type='str'),
igmp_snoop_policy=dict(type='str'),
ip_learning=dict(type='str', choices=['no', 'yes']),
ipv6_nd_policy=dict(type='str'),
l2_unknown_unicast=dict(choices=['proxy', 'flood']),
l3_unknown_multicast=dict(choices=['flood', 'opt-flood']),
limit_ip_learn=dict(type='str', choices=['no', 'yes']),
multi_dest=dict(choices=['bd-flood', 'drop', 'encap-flood']),
state=dict(choices=['absent', 'present', 'query'], type='str', default='present'),
tenant=dict(type='str', aliases=['tenant_name']),
vrf=dict(type='str', aliases=['vrf_name']),
gateway_ip=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
scope=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
subnet_mask=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd', 'tenant']],
['state', 'present', ['bd', 'tenant']],
],
)
arp_flooding = module.params['arp_flooding']
bd = module.params['bd']
bd_type = module.params['bd_type']
if bd_type == 'ethernet':
# ethernet type is represented as regular, but that is not clear to the users
bd_type = 'regular'
description = module.params['description']
enable_multicast = module.params['enable_multicast']
enable_routing = module.params['enable_routing']
endpoint_clear = module.params['endpoint_clear']
endpoint_move_detect = module.params['endpoint_move_detect']
if endpoint_move_detect == 'default':
# the ACI default setting is an empty string, but that is not a good input value
endpoint_move_detect = ''
endpoint_retention_action = module.params['endpoint_retention_action']
endpoint_retention_policy = module.params['endpoint_retention_policy']
igmp_snoop_policy = module.params['igmp_snoop_policy']
ip_learning = module.params['ip_learning']
ipv6_nd_policy = module.params['ipv6_nd_policy']
l2_unknown_unicast = module.params['l2_unknown_unicast']
l3_unknown_multicast = module.params['l3_unknown_multicast']
limit_ip_learn = module.params['limit_ip_learn']
multi_dest = module.params['multi_dest']
state = module.params['state']
vrf = module.params['vrf']
# Give warning when fvSubnet parameters are passed as those have been moved to the aci_subnet module
if module.params['gateway_ip'] or module.params['subnet_mask'] or module.params['scope']:
module._warnings = ["The support for managing Subnets has been moved to its own module, aci_subnet. \
The new modules still supports 'gateway_ip' and 'subnet_mask' along with more features"]
aci = ACIModule(module)
aci.construct_url(root_class="tenant", subclass_1="bd", child_classes=['fvRsCtx', 'fvRsIgmpsn', 'fvRsBDToNdP', 'fvRsBdToEpRet'])
aci.get_existing()
if state == 'present':
# Filter out module params with null values
aci.payload(
aci_class='fvBD',
class_config=dict(
arpFlood=arp_flooding,
descr=description,
epClear=endpoint_clear,
epMoveDetectMode=endpoint_move_detect,
ipLearning=ip_learning,
limitIpLearnToSubnets=limit_ip_learn,
mcastAllow=enable_multicast,
multiDstPktAct=multi_dest,
name=bd,
type=bd_type,
unicastRoute=enable_routing,
unkMacUcastAct=l2_unknown_unicast,
unkMcastAct=l3_unknown_multicast,
),
child_configs=[
{'fvRsCtx': {'attributes': {'tnFvCtxName': vrf}}},
{'fvRsIgmpsn': {'attributes': {'tnIgmpSnoopPolName': igmp_snoop_policy}}},
{'fvRsBDToNdP': {'attributes': {'tnNdIfPolName': ipv6_nd_policy}}},
{'fvRsBdToEpRet': {'attributes': {'resolveAct': endpoint_retention_action, 'tnFvEpRetPolName': endpoint_retention_policy}}},
],
)
# generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvBD')
# submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
Gebesa-Dev/Addons-gebesa
|
refs/heads/9.0
|
sales_order_dealer/models/mrp_production.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2018, Esther Cisneros
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import fields, models
class MrpProduction(models.Model):
_name = 'mrp.production'
_inherit = 'mrp.production'
dealer_id = fields.Many2one('res.partner', string="Comerciante",)
|
elec-otago/agbase
|
refs/heads/master
|
testing/selenium-tests/test_user_change_farm_permission.py
|
1
|
from end_to_end_test import EndToEndTest
import test_config as config
#=============================================================
# This tests that an account with permission to manage a farm
# can give another account management permissions.
# Requires test farm, test farmer, and test farm manager to
# be created first.
#=============================================================
class UserChangeFarmPermissionTest(EndToEndTest):
def test(self):
print "\nTEST user change farm permission"
# Login as test farmer
self.login_user(config.test_farmer_email, config.test_password)
# Navigate to the test farm page
self.navigation.click_farms_dropdown_farm(config.test_farm)
# Give farm manager ability to add new data
self.farm_page.create_farm_permission(
config.test_manager_first + " " + config.test_manager_last,
config.manager_permission)
has_permission = self.farm_page.is_member_in_table(
config.test_manager_first,
config.test_manager_last,
config.manager_permission)
self.assertTrue(has_permission)
|
USStateDept/FPA_Core
|
refs/heads/master
|
openspending/tests/model/test_country.py
|
2
|
import json
import urllib2
from flask import url_for, current_app
from openspending.core import db
from openspending.model.country import Country
from openspending.tests.base import ControllerTestCase
from openspending.command.geometry import create as createcountries
class TestCountryModel(ControllerTestCase):
def setUp(self):
super(TestCountryModel, self).setUp()
createcountries()
def tearDown(self):
pass
def test_all_countries(self):
result = Country.get_all_json()
assert len(result['data']) == 198
assert len(result['data'][0]['regions']) == 8
def test_properties_regions(self):
tempobj = Country.by_gid(1)
assert len(tempobj.regions.keys()) == 10
assert tempobj.label == "Aruba"
def test_properties_regions(self):
tempobj = Country.by_gid(1)
assert tempobj.sovereignty == "Netherlands"
|
fzadow/CATMAID
|
refs/heads/master
|
scripts/database/backup-database.py
|
4
|
#!/usr/bin/env python
# This is a small helper script to back up a CATMAID
# database.
# For example, I'm calling this script from cron with the following
# crontab entry, which will cause a backup to happen every 8 hours at
# 20 past the hour:
#
# 20 0-23/8 * * * CATMAID_CONFIGURATION=$HOME/.catmaid-db.whatever $HOME/catmaid/scripts/backup-database.py /mnt/catmaid-backups/
# You will need to create a .pgpass file so that your password can be
# found.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys
import os
from common import db_database, db_username, db_password
from subprocess import check_call
import getpass
from psycopg2 import IntegrityError
from datetime import datetime
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <BACKUP-DIRECTORY>" % (sys.argv[0],)
sys.exit(1)
destination_directory = sys.argv[1]
output_filename = os.path.join(destination_directory,
datetime.now().strftime('%Y-%m-%dT%H-%M-%S'))
# You must specify your password in ~/.pgpass, as described here:
# http://www.postgresql.org/docs/current/static/libpq-pgpass.html
dump_command = ['pg_dump',
'--clean',
'-U',
db_username,
'--no-password',
db_database]
with open(output_filename, "w") as fp:
check_call(dump_command, stdout=fp)
check_call(['bzip2', output_filename])
|
alanmcruickshank/superset-dev
|
refs/heads/master
|
superset/utils.py
|
1
|
"""Utility functions used across Superset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
from datetime import date, datetime, time, timedelta
import decimal
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
import functools
import json
import logging
import os
import signal
import smtplib
import sys
import uuid
import zlib
import celery
from dateutil.parser import parse
from flask import flash, Markup, redirect, render_template, request, url_for
from flask_appbuilder._compat import as_unicode
from flask_appbuilder.const import (
FLAMSG_ERR_SEC_ACCESS_DENIED,
LOGMSG_ERR_SEC_ACCESS_DENIED,
PERMISSION_PREFIX,
)
from flask_babel import gettext as __
from flask_cache import Cache
import markdown as md
import numpy
import parsedatetime
from past.builtins import basestring
from pydruid.utils.having import Having
import pytz
import sqlalchemy as sa
from sqlalchemy import event, exc, select
from sqlalchemy.types import TEXT, TypeDecorator
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
PY3K = sys.version_info >= (3, 0)
EPOCH = datetime(1970, 1, 1)
DTTM_ALIAS = '__timestamp'
class SupersetException(Exception):
pass
class SupersetTimeoutException(SupersetException):
pass
class SupersetSecurityException(SupersetException):
pass
class MetricPermException(SupersetException):
pass
class NoDataException(SupersetException):
pass
class SupersetTemplateException(SupersetException):
pass
def can_access(sm, permission_name, view_name, user):
"""Protecting from has_access failing from missing perms/view"""
if user.is_anonymous():
return sm.is_item_public(permission_name, view_name)
return sm._has_view_access(user, permission_name, view_name)
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
class memoized(object): # noqa
"""Decorator that caches a function's return value each time it is called
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def js_string_to_python(item):
return None if item in ('null', 'undefined') else item
def string_to_num(s):
"""Converts a string to an int/float
Returns ``None`` if it can't be converted
>>> string_to_num('5')
5
>>> string_to_num('5.2')
5.2
>>> string_to_num(10)
10
>>> string_to_num(10.1)
10.1
>>> string_to_num('this is not a string') is None
True
"""
if isinstance(s, (int, float)):
return s
if s.isdigit():
return int(s)
try:
return float(s)
except ValueError:
return None
class DimSelector(Having):
def __init__(self, **args):
# Just a hack to prevent any exceptions
Having.__init__(self, type='equalTo', aggregation=None, value=None)
self.having = {
'having': {
'type': 'dimSelector',
'dimension': args['dimension'],
'value': args['value'],
},
}
def list_minus(l, minus):
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
def dttm_from_timtuple(d):
return datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
def parse_human_timedelta(s):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s, dttm)[0]
d = datetime(d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm)
def base_json_conv(obj):
if isinstance(obj, numpy.int64):
return int(obj)
elif isinstance(obj, numpy.bool_):
return bool(obj)
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, timedelta):
return str(obj)
def json_iso_dttm_ser(obj):
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, datetime):
obj = obj.isoformat()
elif isinstance(obj, date):
obj = obj.isoformat()
elif isinstance(obj, time):
obj = obj.isoformat()
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
def datetime_to_epoch(dttm):
if dttm.tzinfo:
epoch_with_tz = pytz.utc.localize(EPOCH)
return (dttm - epoch_with_tz).total_seconds() * 1000
return (dttm - EPOCH).total_seconds() * 1000
def now_as_float():
return datetime_to_epoch(datetime.utcnow())
def json_int_dttm_ser(obj):
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, datetime):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj
def json_dumps_w_dates(payload):
return json.dumps(payload, default=json_int_dttm_ser)
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e)
def markdown(s, markup_wrap=False):
s = md.markdown(s or '', [
'markdown.extensions.tables',
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
])
if markup_wrap:
s = Markup(s)
return s
def readfile(file_path):
with open(file_path) as f:
content = f.read()
return content
def generic_find_constraint_name(table, columns, referenced, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for fk in t.foreign_key_constraints:
if (fk.referred_table.name == referenced and
set(fk.column_keys) == columns):
return fk.name
def get_datasource_full_name(database_name, datasource_name, schema=None):
if not schema:
return '[{}].[{}]'.format(database_name, datasource_name)
return '[{}].[{}].[{}]'.format(database_name, schema, datasource_name)
def get_schema_perm(database, schema):
if schema:
return '[{}].[{}]'.format(database, schema)
def validate_json(obj):
if obj:
try:
json.loads(obj)
except Exception:
raise SupersetException('JSON is not valid')
def table_has_constraint(table, name, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for c in t.constraints:
if c.name == name:
return True
return False
class timeout(object):
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
logging.error('Process timed out')
raise SupersetTimeoutException(self.error_message)
def __enter__(self):
try:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
def __exit__(self, type, value, traceback):
try:
signal.alarm(0)
except ValueError as e:
logging.warning("timeout can't be used in the current context")
logging.exception(e)
def pessimistic_connection_handling(some_engine):
@event.listens_for(some_engine, 'engine_connect')
def ping_connection(connection, branch):
if branch:
# 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
connection.scalar(select([1]))
else:
raise
finally:
# restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
class QueryStatus(object):
"""Enum-type class for query statuses"""
STOPPED = 'stopped'
FAILED = 'failed'
PENDING = 'pending'
RUNNING = 'running'
SCHEDULED = 'scheduled'
SUCCESS = 'success'
TIMED_OUT = 'timed_out'
def notify_user_about_perm_udate(
granter, user, role, datasource, tpl_name, config):
msg = render_template(tpl_name, granter=granter, user=user, role=role,
datasource=datasource)
logging.info(msg)
subject = __('[Superset] Access to the datasource %(name)s was granted',
name=datasource.full_name)
send_email_smtp(user.email, subject, msg, config, bcc=granter.email,
dryrun=not config.get('EMAIL_NOTIFICATIONS'))
def send_email_smtp(to, subject, html_content, config, files=None,
dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send an email with html content, eg:
send_email_smtp(
'test@example.com', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config.get('SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ', '.join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ', '.join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, 'rb') as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename))
send_MIME_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
def send_MIME_email(e_from, e_to, mime_msg, config, dryrun=False):
SMTP_HOST = config.get('SMTP_HOST')
SMTP_PORT = config.get('SMTP_PORT')
SMTP_USER = config.get('SMTP_USER')
SMTP_PASSWORD = config.get('SMTP_PASSWORD')
SMTP_STARTTLS = config.get('SMTP_STARTTLS')
SMTP_SSL = config.get('SMTP_SSL')
if not dryrun:
s = smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) if SMTP_SSL else \
smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info('Sent an alert email to ' + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
else:
logging.info('Dryrun enabled, email notification content is below:')
logging.info(mime_msg.as_string())
def get_email_address_list(address_string):
if isinstance(address_string, basestring):
if ',' in address_string:
address_string = address_string.split(',')
elif ';' in address_string:
address_string = address_string.split(';')
else:
address_string = [address_string]
return address_string
def has_access(f):
"""
Use this decorator to enable granular security permissions to your
methods. Permissions will be associated to a role, and roles are
associated to users.
By default the permission's name is the methods name.
Forked from the flask_appbuilder.security.decorators
TODO(bkyryliuk): contribute it back to FAB
"""
if hasattr(f, '_permission_name'):
permission_str = f._permission_name
else:
permission_str = f.__name__
def wraps(self, *args, **kwargs):
permission_str = PERMISSION_PREFIX + f._permission_name
if self.appbuilder.sm.has_access(permission_str,
self.__class__.__name__):
return f(self, *args, **kwargs)
else:
logging.warning(
LOGMSG_ERR_SEC_ACCESS_DENIED.format(permission_str,
self.__class__.__name__))
flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED), 'danger')
# adds next arg to forward to the original path once user is logged in.
return redirect(
url_for(
self.appbuilder.sm.auth_view.__class__.__name__ + '.login',
next=request.path))
f._permission_name = permission_str
return functools.update_wrapper(wraps, f)
def choicify(values):
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
def setup_cache(app, cache_config):
"""Setup the flask-cache on a flask app"""
if cache_config and cache_config.get('CACHE_TYPE') != 'null':
return Cache(app, config=cache_config)
def zlib_compress(data):
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if PY3K:
if isinstance(data, str):
return zlib.compress(bytes(data, 'utf-8'))
return zlib.compress(data)
return zlib.compress(data)
def zlib_decompress_to_string(blob):
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress_to_string(blob)
>>> got_str == json_str
True
"""
if PY3K:
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, 'utf-8'))
return decompressed.decode('utf-8')
return zlib.decompress(blob)
_celery_app = None
def get_celery_app(config):
global _celery_app
if _celery_app:
return _celery_app
_celery_app = celery.Celery(config_source=config.get('CELERY_CONFIG'))
return _celery_app
def merge_extra_filters(form_data):
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
if 'extra_filters' in form_data:
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
if 'filters' not in form_data:
form_data['filters'] = []
date_options = {
'__from': 'since',
'__to': 'until',
'__time_col': 'granularity_sqla',
'__time_grain': 'time_grain_sqla',
'__time_origin': 'druid_time_origin',
'__granularity': 'granularity',
}
# Grab list of existing filters 'keyed' on the column and operator
def get_filter_key(f):
return f['col'] + '__' + f['op']
existing_filters = {}
for existing in form_data['filters']:
existing_filters[get_filter_key(existing)] = existing['val']
for filtr in form_data['extra_filters']:
# Pull out time filters/options and merge into form data
if date_options.get(filtr['col']):
if filtr.get('val'):
form_data[date_options[filtr['col']]] = filtr['val']
elif filtr['val'] and len(filtr['val']):
# Merge column filters
filter_key = get_filter_key(filtr)
if filter_key in existing_filters:
# Check if the filter already exists
if isinstance(filtr['val'], list):
if isinstance(existing_filters[filter_key], list):
# Add filters for unequal lists
# order doesn't matter
if (
sorted(existing_filters[filter_key]) !=
sorted(filtr['val'])
):
form_data['filters'] += [filtr]
else:
form_data['filters'] += [filtr]
else:
# Do not add filter if same value already exists
if filtr['val'] != existing_filters[filter_key]:
form_data['filters'] += [filtr]
else:
# Filter not found, add it
form_data['filters'] += [filtr]
# Remove extra filters from the form data since no longer needed
del form_data['extra_filters']
|
elba7r/lite-system
|
refs/heads/master
|
erpnext/schools/doctype/student_log/student_log.py
|
54
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StudentLog(Document):
pass
|
andrea4ever/l10n-italy
|
refs/heads/8.0
|
__unported__/l10n_it_DDT_webkit/__init__.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# Copyright (c) 2013 Agile Business Group (http://www.agilebg.com)
# @author Lorenzo Battistini
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report
|
balloob/home-assistant
|
refs/heads/dev
|
homeassistant/components/zoneminder/camera.py
|
27
|
"""Support for ZoneMinder camera streaming."""
import logging
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import CONF_NAME, CONF_VERIFY_SSL
from . import DOMAIN as ZONEMINDER_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZoneMinder cameras."""
filter_urllib3_logging()
cameras = []
for zm_client in hass.data[ZONEMINDER_DOMAIN].values():
monitors = zm_client.get_monitors()
if not monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder host: %s")
return
for monitor in monitors:
_LOGGER.info("Initializing camera %s", monitor.id)
cameras.append(ZoneMinderCamera(monitor, zm_client.verify_ssl))
add_entities(cameras)
class ZoneMinderCamera(MjpegCamera):
"""Representation of a ZoneMinder Monitor Stream."""
def __init__(self, monitor, verify_ssl):
"""Initialize as a subclass of MjpegCamera."""
device_info = {
CONF_NAME: monitor.name,
CONF_MJPEG_URL: monitor.mjpeg_image_url,
CONF_STILL_IMAGE_URL: monitor.still_image_url,
CONF_VERIFY_SSL: verify_ssl,
}
super().__init__(device_info)
self._is_recording = None
self._is_available = None
self._monitor = monitor
@property
def should_poll(self):
"""Update the recording state periodically."""
return True
def update(self):
"""Update our recording state from the ZM API."""
_LOGGER.debug("Updating camera state for monitor %i", self._monitor.id)
self._is_recording = self._monitor.is_recording
self._is_available = self._monitor.is_available
@property
def is_recording(self):
"""Return whether the monitor is in alarm mode."""
return self._is_recording
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
|
dsemi/Flexget
|
refs/heads/develop
|
flexget/plugins/__init__.py
|
44
|
"""Standard plugin package."""
|
nickdechant/teletraan
|
refs/heads/master
|
deploy-agent/deployd/common/executor.py
|
6
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import subprocess
import logging
import os
import signal
import stat
import time
import traceback
from deployd.common.types import DeployReport, PingStatus, PRE_STAGE_STEPS, AgentStatus
log = logging.getLogger(__name__)
class Executor(object):
def __init__(self, callback=None, config=None):
self._ping_server = callback
if not config:
return
self._config = config
self.update_configs(config)
def update_configs(self, config):
self.LOG_FILENAME = config.get_subprocess_log_name()
self.MAX_RUNNING_TIME = config.get_subprocess_running_timeout()
self.MIN_RUNNING_TIME = config.get_agent_ping_interval()
self.MAX_RETRY = config.get_subproces_max_retry()
self.MAX_TAIL_BYTES = config.get_subprocess_max_log_bytes()
self.PROCESS_POLL_INTERVAL = config.get_subprocess_poll_interval()
self.BACK_OFF = config.get_backoff_factor()
self.MAX_SLEEP_INTERVAL = config.get_subprocess_max_sleep_interval()
self._config = config
log.debug('Executor configs have been updated: '
'PING_INTERVAL={}, TIME_OUT={}, MAX_RETRY={}'.format(self.MIN_RUNNING_TIME,
self.MAX_RUNNING_TIME,
self.MAX_RETRY))
def get_subprocess_output(self, fd, file_pos):
curr_pos = fd.tell()
fd.seek(file_pos, 0)
output = fd.read()
fd.seek(curr_pos, 0)
return output[-(self.MAX_TAIL_BYTES+1):-1]
def run_cmd(self, cmd, **kw):
if not isinstance(cmd, list):
cmd = cmd.split(' ')
cmd_str = ' '.join(cmd)
log.info('Running: {} with {} retries.'.format(cmd_str, self.MAX_RETRY))
deploy_report = DeployReport(status_code=AgentStatus.UNKNOWN,
error_code=0,
retry_times=0)
process_interval = self.PROCESS_POLL_INTERVAL
start = datetime.datetime.now()
init_start = datetime.datetime.now()
total_retry = 0
with open(self.LOG_FILENAME, 'a+') as fdout:
while total_retry < self.MAX_RETRY:
try:
fdout.seek(0, 2)
file_pos = fdout.tell()
process = subprocess.Popen(cmd, stdout=fdout, stderr=fdout,
preexec_fn=os.setsid, **kw)
while process.poll() is None:
start, deploy_report = \
self.ping_server_if_possible(start, cmd, deploy_report)
"""
terminate case 1:
the server changed the deploy goal, return to the agent to handle next
deploy step
"""
if deploy_report.status_code == AgentStatus.ABORTED_BY_SERVER:
Executor._kill_process(process)
return deploy_report
"""
terminate case 2:
the script gets timeout error, return to the agent to report to the server
"""
if (datetime.datetime.now() - init_start).seconds >= self.MAX_RUNNING_TIME:
Executor._kill_process(process)
# the best way to get output is to tail the log
deploy_report.output_msg = self.get_subprocess_output(fd=fdout,
file_pos=file_pos)
log.info("Exceed max running time: {}.".format(self.MAX_RUNNING_TIME))
log.info("Output from subprocess: {}".format(deploy_report.output_msg))
deploy_report.status_code = AgentStatus.SCRIPT_TIMEOUT
deploy_report.error_code = 1
return deploy_report
# sleep some seconds before next poll
sleep_time = self._get_sleep_interval(start, self.PROCESS_POLL_INTERVAL)
time.sleep(sleep_time)
# finish executing sub process
deploy_report.error_code = process.returncode
deploy_report.output_msg = self.get_subprocess_output(fd=fdout,
file_pos=file_pos)
if process.returncode == 0:
log.info('Running: {} succeeded.'.format(cmd_str))
deploy_report.status_code = AgentStatus.SUCCEEDED
return deploy_report
except Exception:
error_msg = traceback.format_exc()
deploy_report.error_code = 1
deploy_report.output_msg = error_msg
log.error(error_msg)
# fails when:
# subprocess execution fails
# popen throws
deploy_report.status_code = AgentStatus.SCRIPT_FAILED
deploy_report.retry_times += 1
total_retry += 1
"""
Terminate case 3:
Too many failed retries, return to the agent and report to the server.
"""
if total_retry >= self.MAX_RETRY:
deploy_report.status_code = AgentStatus.TOO_MANY_RETRY
return deploy_report
init_start = datetime.datetime.now() # reset the initial start time
log.info('Failed: {}, at {} retry. Error:\n{}'.format(cmd_str,
deploy_report.retry_times,
deploy_report.output_msg))
sleep_time = self._get_sleep_interval(start, process_interval)
time.sleep(sleep_time)
start, deploy_report = self.ping_server_if_possible(start, cmd, deploy_report)
if deploy_report.status_code == AgentStatus.ABORTED_BY_SERVER:
return deploy_report
# sleep the rest of the time
if process_interval - sleep_time > 0:
time.sleep(process_interval - sleep_time)
# exponential backoff
process_interval = min(process_interval * self.BACK_OFF, self.MAX_SLEEP_INTERVAL)
deploy_report.status_code = AgentStatus.TOO_MANY_RETRY
return deploy_report
def ping_server_if_possible(self, start, cmd_str, deploy_report):
now = datetime.datetime.now()
processed_time = (now - start).seconds
log.debug("start: {}, now: {}, process: {}".format(start, now, processed_time))
if processed_time >= self.MIN_RUNNING_TIME and self._ping_server:
start = now
log.info('Exceed min running time: {}, '
'reporting to the server'.format(self.MIN_RUNNING_TIME))
result = self._ping_server(deploy_report)
if result == PingStatus.PLAN_CHANGED:
deploy_report.status_code = AgentStatus.ABORTED_BY_SERVER
log.info('Deploy goal has changed, '
'aborting the current command {}.'.format(' '.join(cmd_str)))
return start, deploy_report
def _get_sleep_interval(self, start, interval):
now = datetime.datetime.now()
max_sleep_seconds = self.MIN_RUNNING_TIME - (now - start).seconds
return min(interval, max(max_sleep_seconds, 1))
@staticmethod
def _kill_process(process):
try:
os.killpg(process.pid, signal.SIGKILL)
except Exception as e:
log.debug('Failed to kill process: {}'.format(e.message))
def execute_command(self, script):
try:
deploy_step = os.getenv('DEPLOY_STEP')
if not os.path.exists(self._config.get_script_directory()):
"""if the teletraan directory does not exist in the pre stage steps. It
means it's a newly added host (never deployed before). Show a warning message
and exit. Otherwise, we treat it as an agent failure (nothing to execute)
"""
error_msg = "teletraan directory cannot be found " \
"in the tar ball in step {}!".format(deploy_step)
if deploy_step in PRE_STAGE_STEPS:
log.warning(error_msg)
return DeployReport(status_code=AgentStatus.SUCCEEDED)
else:
log.error(error_msg)
return DeployReport(status_code=AgentStatus.AGENT_FAILED, error_code=1,
retry_times=1, output_msg=error_msg)
script = os.path.join(self._config.get_script_directory(), script)
if not os.path.exists(script):
if deploy_step == 'RESTARTING':
# RESTARTING script is required
error_msg = 'RESTARTING script does not exist.'
log.error(error_msg)
return DeployReport(status_code=AgentStatus.AGENT_FAILED, error_code=1,
retry_times=1, output_msg=error_msg)
else:
log.info('script: {} does not exist.'.format(script))
return DeployReport(status_code=AgentStatus.SUCCEEDED)
os.chdir(self._config.get_script_directory())
# change the mode of the script
st = os.stat(script)
os.chmod(script, st.st_mode | stat.S_IXUSR)
return self.run_cmd(script)
except Exception as e:
error_msg = e.message
log.error('Failed to execute command: {}. Reason: {}'.format(script, error_msg))
log.error(traceback.format_exc())
return DeployReport(status_code=AgentStatus.AGENT_FAILED,
error_code=1,
output_msg=e.message)
|
crosswalk-project/blink-crosswalk
|
refs/heads/master
|
Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
|
35
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(None)
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
"random junk": None,
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
def test_file_info_list_to_revision_to_build_list(self):
file_info_list = [
{"filename": "r47483 (1)/"},
{"filename": "r47483 (1).zip"},
{"filename": "random junk"},
]
builds_and_revisions_list = [(47483, 1), (47483, 1)]
self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
def test_fetch_build(self):
buildbot = BuildBot()
builder = Builder(u"Test Builder \u2661", buildbot)
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
},
"number": int(build_number),
# Intentionally missing the 'results' key, meaning it's a "pass" build.
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
self.assertIsNotNone(builder._fetch_build(1))
def test_results_url(self):
builder = BuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
self.assertEqual(builder.results_url(),
'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_')
def test_accumulated_results_url(self):
builder = BuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
self.assertEqual(builder.accumulated_results_url(),
'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_/results/layout-test-results')
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEqual(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertFalse(build.is_green())
self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
_fake_builder_page = '''
<body>
<div class="content">
<h1>Some Builder</h1>
<p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
<div class="column">
<h2>Recent Builds:</h2>
<table class="info">
<tr>
<th>Time</th>
<th>Revision</th>
<th>Result</th> <th>Build #</th>
<th>Info</th>
</tr>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">success</td> <td><a href=".../37602">#37602</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
<td class="left">Failed compile-webkit</td>
</tr>
</table>
</body>'''
_fake_builder_page_without_success = '''
<body>
<table>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 11:58</td>
<td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
<td class="retry">retry</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td>
</tr>
</table>
</body>'''
def test_revisions_for_builder(self):
buildbot = BuildBot()
buildbot._fetch_builder_page = lambda builder: builder.page
builder_with_success = Builder('Some builder', None)
builder_with_success.page = self._fake_builder_page
self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
builder_without_success = Builder('Some builder', None)
builder_without_success.page = self._fake_builder_page_without_success
self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
def test_find_green_revision(self):
buildbot = BuildBot()
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, True), (3, False)],
'Builder 3': [(1, True), (3, True)],
}), 1)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (3, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, False), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, True), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 2)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (2, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
'Builder 3': [(2, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (4, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (3, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [],
'Builder 3': [(1, True), (2, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEqual("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
|
g-weatherill/hmtk
|
refs/heads/master
|
hmtk/faults/mfd/anderson_luco_area_mmax.py
|
1
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2013, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
#License as published by the Free Software Foundation, either version
#3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
#DISCLAIMER
#
# The software Hazard Modeller's Toolkit (hmtk) provided herein
#is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
#Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (hmtk) is therefore distributed WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
"""
Module :mod: mfd.anderson_luco_1_mmax implements :class:
AndersonLucoType1Mmax. This calculates the magnitude occurrence rate on a fault
given a known slip value using the exponential models described by
Anderson & Luco (1983) referring to the rupture area of the maximum earthquake.
Anderson, J. G., and Luco, J. E. (1983) "Consequences of slip rate constraints
on earthquake recurrence relations". Bull. Seis. Soc. Am. 73(2) 471 - 496
"""
import abc
import numpy as np
from hmtk.faults.mfd.base import BaseMFDfromSlip
C_VALUE = 16.05
D_VALUE = 1.5
class BaseRecurrenceModel(object):
'''
Abstract base class to implement cumulative value formula
'''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of earthquakes with M > mag_value
'''
raise NotImplementedError
class Type1RecurrenceModel(BaseRecurrenceModel):
'''
Calculate N(M > mag_value) using Anderson & Luco Type 1 formula as
inverse of formula I.10 of Table 2 in Anderson & Luco (1993).
'''
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = (mmax - mag_value)
a_1 = self._get_a1_value(bbar, dbar, slip / 10., beta, mmax)
return a_1 * np.exp(bbar * delta_m) * (delta_m > 0.0)
@staticmethod
def _get_a1_value(bbar, dbar, slip, beta, mmax):
"""
Returns the A1 value defined in I.9 (Table 2)
"""
return ((dbar - bbar) / dbar) * (slip / beta) *\
np.exp(-(dbar / 2.) * mmax)
class Type2RecurrenceModel(BaseRecurrenceModel):
'''
Calculate N(M > mag_value) using Anderson & Luco Type 1 formula as
inverse of formula II.9 of Table 3 in Anderson & Luco (1993).
'''
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_2 = self._get_a2_value(bbar, dbar, slip / 10., beta, mmax)
return a_2 * (np.exp(bbar * delta_m) - 1.0) * (delta_m > 0.0)
@staticmethod
def _get_a2_value(bbar, dbar, slip, beta, mmax):
"""
Returns the A2 value defined in II.8 (Table 3)
"""
return ((dbar - bbar) / bbar) * (slip / beta) *\
np.exp(-(dbar / 2.) * mmax)
class Type3RecurrenceModel(BaseRecurrenceModel):
'''
Calculate N(M > mag_value) using Anderson & Luco Type 1 formula as
inverse of formula III.9 of Table 4 in Anderson & Luco (1993).
'''
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_3 = self._get_a3_value(bbar, dbar, slip / 10., beta, mmax)
central_term = np.exp(bbar * delta_m) - 1.0 - (bbar * delta_m)
return a_3 * central_term * (delta_m > 0.0)
@staticmethod
def _get_a3_value(bbar, dbar, slip, beta, mmax):
"""
Returns the A3 value defined in III.4 (Table 4)
"""
return (dbar * (dbar - bbar) / (bbar ** 2.)) * (slip / beta) *\
np.exp(-(dbar / 2.) * mmax)
RECURRENCE_MAP = {'First': Type1RecurrenceModel(),
'Second': Type2RecurrenceModel(),
'Third': Type3RecurrenceModel()}
class AndersonLucoAreaMmax(BaseMFDfromSlip):
'''
Class to implement the 1st fault activity rate calculator
of Anderson & Luco (1983)
:param str mfd_type:
Type of magnitude frequency distribution
:param float mfd_weight:
Weight of the mfd distribution (for subsequent logic tree processing)
:param float bin_width:
Width of the magnitude bin (rates are given for the centre point)
:param float mmin:
Minimum magnitude
:param float mmax:
Maximum magnitude
:param float mmax_sigma:
Uncertainty on maximum magnitude
:param float b_value:
Exponent (b-value) for the magnitude frequency distribution
:param numpy.ndarray occurrence_rate:
Activity rates for magnitude in the range mmin to mmax in steps of
bin_width
'''
def setUp(self, mfd_conf):
'''
Input core configuration parameters as specified in the
configuration file
:param dict mfd_conf:
Configuration file containing the following attributes:
* 'Type' - Choose between the 1st, 2nd or 3rd type of recurrence
model {'First' | 'Second' | 'Third'}
* 'Model_Weight' - Logic tree weight of model type (float)
* 'MFD_spacing' - Width of MFD bin (float)
* 'Minimum_Magnitude' - Minimum magnitude of activity rates (float)
* 'b_value' - Tuple of (b-value, b-value uncertainty)
* 'Maximum_Magnitude' - Maximum magnitude on fault (if not defined
will use scaling relation)
* 'Maximum_Magnitude_Uncertainty' - Uncertainty
on maximum magnitude
(If not defined and the MSR has a sigma term then this will be
taken from sigma)
'''
self.mfd_type = mfd_conf['Model_Type']
self.mfd_model = 'Anderson & Luco (Mmax) ' + self.mfd_type
self.mfd_weight = mfd_conf['Model_Weight']
self.bin_width = mfd_conf['MFD_spacing']
self.mmin = mfd_conf['Minimum_Magnitude']
self.mmax = None
self.mmax_sigma = None
self.b_value = mfd_conf['b_value'][0]
self.b_value_sigma = mfd_conf['b_value'][1]
self.occurrence_rate = None
def get_mmax(self, mfd_conf, msr, rake, area):
'''
Gets the mmax for the fault - reading directly from the config file
or using the msr otherwise
:param dict mfd_config:
Configuration file (see setUp for paramters)
:param msr:
Instance of :class: nhlib.scalerel
:param float rake:
Rake of the fault (in range -180 to 180)
:param float area:
Area of the fault surface (km^2)
'''
if mfd_conf['Maximum_Magnitude']:
self.mmax = mfd_conf['Maximum_Magnitude']
else:
self.mmax = msr.get_median_mag(area, rake)
self.mmax_sigma = mfd_conf.get(
'Maximum_Magnitude_Uncertainty', None) or msr.get_std_dev_mag(rake)
def get_mfd(self, slip, fault_width, shear_modulus=30.0,
disp_length_ratio=1.25E-5):
'''
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
'''
beta = np.sqrt((disp_length_ratio * (10.0 ** C_VALUE)) /
((shear_modulus * 1.0E10) * (fault_width * 1E5)))
dbar = D_VALUE * np.log(10.0)
bbar = self.b_value * np.log(10.0)
mag = np.arange(self.mmin - (self.bin_width / 2.),
self.mmax + self.bin_width,
self.bin_width)
if bbar > dbar:
print ('b-value larger than 1.5 will produce invalid results in '
'Anderson & Luco models')
self.occurrence_rate = np.nan * np.ones(len(mag) - 1)
return self.mmin, self.bin_width, self.occurrence_rate
self.occurrence_rate = np.zeros(len(mag) - 1, dtype=float)
for ival in range(0, len(mag) - 1):
self.occurrence_rate[ival] = \
RECURRENCE_MAP[self.mfd_type].cumulative_value(
slip, self.mmax, mag[ival], bbar, dbar, beta) - \
RECURRENCE_MAP[self.mfd_type].cumulative_value(
slip, self.mmax, mag[ival + 1], bbar, dbar, beta)
if self.occurrence_rate[ival] < 0.:
self.occurrence_rate[ival] = 0.
return self.mmin, self.bin_width, self.occurrence_rate
|
evanbiederstedt/RRBSfun
|
refs/heads/master
|
scripts/repeat_finder_scripts/repeat_finder_RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.G.py
|
1
|
import glob
import numpy as np
import pandas as pd
from numpy import nan
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/RRBS_anno_clean")
repeats = pd.read_csv("repeats_hg19.csv")
annofiles = glob.glob("RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.G*")
def between_range(row):
subset = repeats.loc[(row["chr"] == repeats.chr) & (row.start >= repeats.start) & (row.start <= repeats.end), :]
if subset.empty:
return np.nan
return subset.repeat_class
#newdf1 = pd.DataFrame()
for filename in annofiles:
df = pd.read_table(filename)
df["hg19_repeats"] = df.apply(between_range, axis = 1)
df.to_csv(str("repeatregions_") + filename + ".csv", index=False)
|
darktears/chromium-crosswalk
|
refs/heads/master
|
tools/win/new_analyze_warnings/retrieve_warnings.py
|
43
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This retrieves the latest warnings from the Chrome /analyze build machine, and
does a diff.
This script is intended to be run from retrieve_latest_warnings.bat which
fills out the functionality.
"""
import urllib
import sys
import glob
import os
if len(sys.argv) < 2:
print "Missing build number."
sys.exit(10)
buildNumber = int(sys.argv[1])
baseURL = "http://build.chromium.org/p/chromium.fyi/builders/" + \
"Chromium%20Windows%20Analyze/"
print "Finding recent builds on %s" % baseURL
baseData = urllib.urlopen(baseURL).read()
recentOff = baseData.find("Recent Builds:")
buildPattern = 'success</td> <td><a href="' + \
'../../builders/Chromium%20Windows%20Analyze/builds/'
# For some reason I couldn't get regular expressions to work on this data.
latestBuildOff = baseData.find(buildPattern, recentOff) + len(buildPattern)
if latestBuildOff < len(buildPattern):
print "Couldn't find successful build."
sys.exit(10)
latestEndOff = baseData.find('"', latestBuildOff)
latestBuildStr = baseData[latestBuildOff:latestEndOff]
maxBuildNumber = int(latestBuildStr)
if buildNumber > maxBuildNumber:
print "Requested build number (%d) is too high. Maximum is %d." % \
(buildNumber, maxBuildNumber)
sys.exit(10)
# Treat negative numbers specially
if sys.argv[1][0] == '-':
buildNumber = maxBuildNumber + buildNumber
if buildNumber < 0:
buildNumber = 0
print "Retrieving build number %d of %d" % (buildNumber, maxBuildNumber)
# Found the last summary results in the current directory
results = glob.glob("analyze*_summary.txt")
results.sort()
previous = "%04d" % (buildNumber - 1)
if results:
possiblePrevious = results[-1][7:11]
if int(possiblePrevious) == buildNumber:
if len(results) > 1:
previous = results[-2][7:11]
else:
previous = possiblePrevious
dataURL = baseURL + "builds/" + str(buildNumber) + "/steps/compile/logs/stdio"
revisionURL = baseURL + "builds/" + str(buildNumber)
# Retrieve the revision
revisionData = urllib.urlopen(revisionURL).read()
key = "Got Revision</td><td>"
Off = revisionData.find(key) + len(key)
if Off > len(key):
revision = revisionData[Off: Off + 40]
print "Revision is '%s'" % revision
print "Environment variables can be set with set_analyze_revision.bat"
payload = "set ANALYZE_REVISION=%s\r\n" % revision
payload += "set ANALYZE_BUILD_NUMBER=%04d\r\n" % buildNumber
payload += "set ANALYZE_PREV_BUILD_NUMBER=%s\r\n" % previous
open("set_analyze_revision.bat", "wt").write(payload)
# Retrieve the raw warning data
print "Retrieving raw build results. Please wait."
data = urllib.urlopen(dataURL).read()
if data.count("status: SUCCESS") == 0:
print "Build failed or is incomplete."
else:
# Fix up "'" and '"'
data = data.replace("'", "'").replace(""", '"')
# Fix up '<' and '>'
data = data.replace("<", "<").replace(">", ">")
# Fix up '&'
data = data.replace("&", "&")
# Fix up random spans
data = data.replace('</span><span class="stdout">', '')
# Fix up the source paths to match my local /analyze repo
if "ANALYZE_REPO" in os.environ:
sourcePath = r"e:\b\build\slave\chromium_windows_analyze\build\src"
destPath = os.path.join(os.environ["ANALYZE_REPO"], "src")
data = data.replace(sourcePath, destPath)
outputName = "analyze%04d_full.txt" % buildNumber
open(outputName, "w").write(data)
print "Done. Data is in %s" % outputName
else:
print "No revision information found!"
|
Teagan42/home-assistant
|
refs/heads/dev
|
homeassistant/components/yeelight/binary_sensor.py
|
3
|
"""Sensor platform support for yeelight."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_UPDATED, DATA_YEELIGHT
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Yeelight sensors."""
if not discovery_info:
return
device = hass.data[DATA_YEELIGHT][discovery_info["host"]]
if device.is_nightlight_supported:
_LOGGER.debug("Adding nightlight mode sensor for %s", device.name)
add_entities([YeelightNightlightModeSensor(device)])
class YeelightNightlightModeSensor(BinarySensorDevice):
"""Representation of a Yeelight nightlight mode sensor."""
def __init__(self, device):
"""Initialize nightlight mode sensor."""
self._device = device
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
async_dispatcher_connect(
self.hass,
DATA_UPDATED.format(self._device.ipaddr),
self._schedule_immediate_update,
)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._device.name} nightlight"
@property
def is_on(self):
"""Return true if nightlight mode is on."""
return self._device.is_nightlight_enabled
|
vks/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_dispatch.py
|
488
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for dispatch module."""
import os
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from test import mock
_TEST_HANDLERS_DIR = os.path.join(
os.path.split(__file__)[0], 'testdata', 'handlers')
_TEST_HANDLERS_SUB_DIR = os.path.join(_TEST_HANDLERS_DIR, 'sub')
class DispatcherTest(unittest.TestCase):
"""A unittest for dispatch module."""
def test_normalize_path(self):
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('/a/b'))
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('\\a\\b'))
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('/a/c/../b'))
self.assertEqual(os.path.abspath('abc').replace('\\', '/'),
dispatch._normalize_path('abc'))
def test_converter(self):
converter = dispatch._create_path_to_resource_converter('/a/b')
# Python built by MSC inserts a drive name like 'C:\' via realpath().
# Converter Generator expands provided path using realpath() and uses
# the path including a drive name to verify the prefix.
os_root = os.path.realpath('/')
self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
self.assertEqual('/c/h', converter(os_root + 'a/b/c/h_wsh.py'))
self.assertEqual(None, converter(os_root + 'a/b/h.py'))
self.assertEqual(None, converter('a/b/h_wsh.py'))
converter = dispatch._create_path_to_resource_converter('a/b')
self.assertEqual('/h', converter(dispatch._normalize_path(
'a/b/h_wsh.py')))
converter = dispatch._create_path_to_resource_converter('/a/b///')
self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
self.assertEqual('/h', converter(dispatch._normalize_path(
'/a/b/../b/h_wsh.py')))
converter = dispatch._create_path_to_resource_converter(
'/a/../a/b/../b/')
self.assertEqual('/h', converter(os_root + 'a/b/h_wsh.py'))
converter = dispatch._create_path_to_resource_converter(r'\a\b')
self.assertEqual('/h', converter(os_root + r'a\b\h_wsh.py'))
self.assertEqual('/h', converter(os_root + r'a/b/h_wsh.py'))
def test_enumerate_handler_file_paths(self):
paths = list(
dispatch._enumerate_handler_file_paths(_TEST_HANDLERS_DIR))
paths.sort()
self.assertEqual(8, len(paths))
expected_paths = [
os.path.join(_TEST_HANDLERS_DIR, 'abort_by_user_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'blank_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'origin_check_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'exception_in_transfer_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub', 'non_callable_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub', 'plain_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_handshake_sig_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_transfer_sig_wsh.py'),
]
for expected, actual in zip(expected_paths, paths):
self.assertEqual(expected, actual)
def test_source_handler_file(self):
self.assertRaises(
dispatch.DispatchException, dispatch._source_handler_file, '')
self.assertRaises(
dispatch.DispatchException, dispatch._source_handler_file, 'def')
self.assertRaises(
dispatch.DispatchException, dispatch._source_handler_file, '1/0')
self.failUnless(dispatch._source_handler_file(
'def web_socket_do_extra_handshake(request):pass\n'
'def web_socket_transfer_data(request):pass\n'))
def test_source_warnings(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
warnings = dispatcher.source_warnings()
warnings.sort()
expected_warnings = [
(os.path.realpath(os.path.join(
_TEST_HANDLERS_DIR, 'blank_wsh.py')) +
': web_socket_do_extra_handshake is not defined.'),
(os.path.realpath(os.path.join(
_TEST_HANDLERS_DIR, 'sub', 'non_callable_wsh.py')) +
': web_socket_do_extra_handshake is not callable.'),
(os.path.realpath(os.path.join(
_TEST_HANDLERS_DIR, 'sub', 'wrong_handshake_sig_wsh.py')) +
': web_socket_do_extra_handshake is not defined.'),
(os.path.realpath(os.path.join(
_TEST_HANDLERS_DIR, 'sub', 'wrong_transfer_sig_wsh.py')) +
': web_socket_transfer_data is not defined.'),
]
self.assertEquals(4, len(warnings))
for expected, actual in zip(expected_warnings, warnings):
self.assertEquals(expected, actual)
def test_do_extra_handshake(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest()
request.ws_resource = '/origin_check'
request.ws_origin = 'http://example.com'
dispatcher.do_extra_handshake(request) # Must not raise exception.
request.ws_origin = 'http://bad.example.com'
try:
dispatcher.do_extra_handshake(request)
self.fail('Could not catch HandshakeException with 403 status')
except handshake.HandshakeException, e:
self.assertEquals(403, e.status)
except Exception, e:
self.fail('Unexpected exception: %r' % e)
def test_abort_extra_handshake(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest()
request.ws_resource = '/abort_by_user'
self.assertRaises(handshake.AbortedByUserException,
dispatcher.do_extra_handshake, request)
def test_transfer_data(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
request.ws_resource = '/origin_check'
request.ws_protocol = 'p1'
dispatcher.transfer_data(request)
self.assertEqual('origin_check_wsh.py is called for /origin_check, p1'
'\xff\x00',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
request.ws_resource = '/sub/plain'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain, None'
'\xff\x00',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
request.ws_resource = '/sub/plain?'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain?, None'
'\xff\x00',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn('\xff\x00'))
request.ws_resource = '/sub/plain?q=v'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain?q=v, None'
'\xff\x00',
request.connection.written_data())
def test_transfer_data_no_handler(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
for resource in ['/blank', '/sub/non_callable',
'/sub/no_wsh_at_the_end', '/does/not/exist']:
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = resource
request.ws_protocol = 'p2'
try:
dispatcher.transfer_data(request)
self.fail()
except dispatch.DispatchException, e:
self.failUnless(str(e).find('No handler') != -1)
except Exception:
self.fail()
def test_transfer_data_handler_exception(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/sub/exception_in_transfer'
request.ws_protocol = 'p3'
try:
dispatcher.transfer_data(request)
self.fail()
except Exception, e:
self.failUnless(str(e).find('Intentional') != -1,
'Unexpected exception: %s' % e)
def test_abort_transfer_data(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest()
request.ws_resource = '/abort_by_user'
self.assertRaises(handshake.AbortedByUserException,
dispatcher.transfer_data, request)
def test_scan_dir(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
self.assertEqual(4, len(disp._handler_suite_map))
self.failUnless('/origin_check' in disp._handler_suite_map)
self.failUnless(
'/sub/exception_in_transfer' in disp._handler_suite_map)
self.failUnless('/sub/plain' in disp._handler_suite_map)
def test_scan_sub_dir(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, _TEST_HANDLERS_SUB_DIR)
self.assertEqual(2, len(disp._handler_suite_map))
self.failIf('/origin_check' in disp._handler_suite_map)
self.failUnless(
'/sub/exception_in_transfer' in disp._handler_suite_map)
self.failUnless('/sub/plain' in disp._handler_suite_map)
def test_scan_sub_dir_as_root(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_SUB_DIR,
_TEST_HANDLERS_SUB_DIR)
self.assertEqual(2, len(disp._handler_suite_map))
self.failIf('/origin_check' in disp._handler_suite_map)
self.failIf('/sub/exception_in_transfer' in disp._handler_suite_map)
self.failIf('/sub/plain' in disp._handler_suite_map)
self.failUnless('/exception_in_transfer' in disp._handler_suite_map)
self.failUnless('/plain' in disp._handler_suite_map)
def test_scan_dir_must_under_root(self):
dispatch.Dispatcher('a/b', 'a/b/c') # OK
dispatch.Dispatcher('a/b///', 'a/b') # OK
self.assertRaises(dispatch.DispatchException,
dispatch.Dispatcher, 'a/b/c', 'a/b')
def test_resource_path_alias(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
disp.add_resource_path_alias('/', '/origin_check')
self.assertEqual(5, len(disp._handler_suite_map))
self.failUnless('/origin_check' in disp._handler_suite_map)
self.failUnless(
'/sub/exception_in_transfer' in disp._handler_suite_map)
self.failUnless('/sub/plain' in disp._handler_suite_map)
self.failUnless('/' in disp._handler_suite_map)
self.assertRaises(dispatch.DispatchException,
disp.add_resource_path_alias, '/alias', '/not-exist')
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
timdiels/chicken_turtle_util
|
refs/heads/master
|
pytil/__init__.py
|
1
|
# Copyright (C) 2016 VIB/BEG/UGent - Tim Diels <timdiels.m@gmail.com>
#
# This file is part of pytil.
#
# pytil is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pytil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pytil. If not, see <http://www.gnu.org/licenses/>.
__version__ = '7.0.1.dev'
|
vitan/hue
|
refs/heads/master
|
desktop/core/ext-py/Pygments-1.3.1/pygments/util.py
|
56
|
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import codecs
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*\s+
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
class ClassNotFound(ValueError):
"""
If one of the get_*_by_* functions didn't find a matching class.
"""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, basestring):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, basestring):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""
Return a static text analysation function that
returns float values.
"""
def text_analyse(text):
rv = f(text)
if not rv:
return 0.0
return min(1.0, max(0.0, float(rv)))
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""
Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""
Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""
Check if the file looks like it has a html doctype.
"""
return doctype_matches(text, r'html\s+PUBLIC\s+"-//W3C//DTD X?HTML.*')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""
Check if a doctype exists or if we have some tags.
"""
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python 2/3 compatibility
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
import StringIO, cStringIO
BytesIO = cStringIO.StringIO
StringIO = StringIO.StringIO
uni_open = codecs.open
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return bytes(map(ord, s))
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
import io
BytesIO = io.BytesIO
StringIO = io.StringIO
uni_open = builtins.open
|
RGood/praw
|
refs/heads/master
|
tests/unit/models/reddit/test_wikipage.py
|
6
|
import pickle
from praw.models import Subreddit, WikiPage
from ... import UnitTest
class TestWikiPage(UnitTest):
def test_equality(self):
page1 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='x')
page2 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='2')
page3 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'b'),
name='1')
page4 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'A'),
name='x')
page5 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='X')
assert page1 == page1
assert page2 == page2
assert page3 == page3
assert page1 != page2
assert page1 != page3
assert page1 == page4
assert page1 == page5
def test_hash(self):
page1 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='x')
page2 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='2')
page3 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'b'),
name='1')
page4 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'A'),
name='x')
page5 = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='X')
assert hash(page1) == hash(page1)
assert hash(page2) == hash(page2)
assert hash(page3) == hash(page3)
assert hash(page1) != hash(page2)
assert hash(page1) != hash(page3)
assert hash(page1) == hash(page4)
assert hash(page1) == hash(page5)
def test_pickle(self):
page = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='x')
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(page, protocol=level))
assert page == other
def test_repr(self):
page = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='x')
assert repr(page) == ('WikiPage(subreddit=Subreddit(display_name=\'a\''
'), name=\'x\')')
def test_str(self):
page = WikiPage(self.reddit, subreddit=Subreddit(self.reddit, 'a'),
name='x')
assert str(page) == 'a/x'
|
protonyx/labtronyx
|
refs/heads/master
|
labtronyx/common/jsonrpc.py
|
1
|
"""
JSON RPC Python class for PTX-RPC
Conforms to the JSON RPC 2.0 Spec (http://www.jsonrpc.org/specification) with
a small addition to allow for both positional and keyword arguments
This class can either be instantiated with a JSON encoded string or used as
a utility helper class
"""
import json
from . import errors
from .rpc import RpcRequest, RpcResponse
def get_content_type():
return 'application/json'
#===============================================================================
# Error Type
#===============================================================================
def error_encode(obj):
return {'jsonrpc': '2.0',
'id': obj.id,
'error': {'code': obj.code, 'message': obj.message}}
class JsonRpc_Error(errors.RpcError):
code = None
message = None
data = None
def __init__(self, **rpc_dict):
RuntimeError.__init__(self)
self.id = rpc_dict.get('id', None)
if 'error' in rpc_dict:
error = rpc_dict.get('error', {})
self.code = error.get('code', None)
self.message = error.get('message', None)
def __str__(self):
return repr(str(self.message))
def export(self):
return error_encode(self)
class JsonRpc_ParseError(errors.RpcInvalidPacket, JsonRpc_Error):
code = -32700
message = 'Invalid JSON was received by the server.'
class JsonRpc_InvalidRequest(errors.RpcInvalidPacket, JsonRpc_Error):
code = -32600
message = 'The JSON sent is not a valid Request object.'
class JsonRpc_MethodNotFound(errors.RpcMethodNotFound, JsonRpc_Error):
code = -32601
message = 'The method does not exist / is not available.'
class JsonRpc_InvalidParams(errors.RpcServerException, JsonRpc_Error):
code = -32602
message = 'Invalid method parameter(s).'
class JsonRpc_InternalError(errors.RpcServerException, JsonRpc_Error):
code = -32603
message = 'Internal JSON-RPC error.'
class JsonRpc_ServerException(errors.RpcServerException, JsonRpc_Error):
code = -32000
message = 'An unhandled server exception occurred'
JsonRpcErrors = {
-32700: JsonRpc_ParseError,
-32600: JsonRpc_InvalidRequest,
-32601: JsonRpc_MethodNotFound,
-32602: JsonRpc_InvalidParams,
-32603: JsonRpc_InternalError,
-32000: JsonRpc_ServerException
# -32000 to -32099 are reserved server-errors
}
JsonRpc_error_map = {
errors.RpcError: JsonRpc_InternalError,
errors.RpcInvalidPacket: JsonRpc_InvalidRequest,
errors.RpcMethodNotFound: JsonRpc_MethodNotFound,
errors.RpcServerException: JsonRpc_ServerException
}
#===============================================================================
# Request Type
#===============================================================================
class JsonRpc_Request(RpcRequest):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.method = rpc_dict.get('method', '')
# decode arguments
args = rpc_dict.get('params', [])
kwargs = rpc_dict.get('kwargs', {})
if type(args) == dict:
self.kwargs = args
self.args = []
else:
self.kwargs = kwargs
self.args = args
def export(self):
# Slight modification of the JSON RPC 2.0 specification to allow
# both positional and named parameters
# Adds kwargs variable to object only when both are present
out = {'jsonrpc': '2.0',
'id': self.id,
'method': self.method}
if len(self.args) > 0:
out['params'] = self.args
if len(self.kwargs) > 0:
out['kwargs'] = self.kwargs
elif len(self.args) == 0:
out['params'] = self.kwargs
return out
#===============================================================================
# Response Type
#===============================================================================
class JsonRpc_Response(RpcResponse):
def __init__(self, **rpc_dict):
self.id = rpc_dict.get('id', None)
self.result = rpc_dict.get('result', None)
def export(self):
ret = {'jsonrpc': '2.0',
'id': self.id,
'result': self.result}
return ret
def buildResponse(*args, **kwargs):
"""
Factory function to build RpcResponse objects
:rtype: JsonRpc_Response
"""
return JsonRpc_Response(*args, **kwargs)
#===============================================================================
# JSON RPC Handlers
#===============================================================================
def _parseJsonRpcObject(rpc_dict):
"""
Takes a dictionary and determines if it is an RPC request or response
"""
if rpc_dict.get('jsonrpc') == '2.0':
if 'method' in rpc_dict.keys() and type(rpc_dict.get('method')) is unicode:
# Request object
req = RpcRequest(**rpc_dict)
req.kwargs = rpc_dict.get('kwargs', {})
req.args = rpc_dict.get('params', [])
if type(req.args) == dict:
req.kwargs = req.args
req.args = []
# if len(args) > 0 and len(kwargs) > 0:
# # Multiple parameter types
# req.args = args
# req.kwargs = kwargs
# elif len(args) > 0 and len(kwargs) == 0:
# # Only positional parameters
# req.args = args
# req.kwargs = {}
# elif len(args) == 0 and len(kwargs) > 0:
# # Only keyword parameters
# req.args = []
# req.kwargs = kwargs
# else:
# # No parameters?
# req.args = args
# req.kwargs = kwargs
return req
elif 'id' in rpc_dict.keys() and 'result' in rpc_dict.keys():
# Result response object
return RpcResponse(**rpc_dict)
elif 'id' in rpc_dict.keys() and 'error' in rpc_dict.keys():
# Error response object
error_code = rpc_dict['error'].get('code', -32700)
err_obj = JsonRpcErrors.get(error_code, JsonRpc_ParseError)
return err_obj(**rpc_dict)
else:
return JsonRpc_InvalidRequest(**rpc_dict)
else:
return JsonRpc_InvalidRequest()
def decode(data):
"""
:param data:
:return: (requests, responses, errors)
"""
requests = []
responses = []
rpc_errors = []
try:
req = json.loads(data)
if type(req) == list:
# Batch request
for sub_req in req:
try:
res = _parseJsonRpcObject(sub_req)
if isinstance(res, RpcRequest):
requests.append(res)
elif isinstance(res, RpcResponse):
responses.append(res)
elif isinstance(res, errors.RpcError):
rpc_errors.append(res)
except:
rpc_errors.append(JsonRpc_InvalidRequest())
if len(req) == 0:
rpc_errors.append(JsonRpc_InvalidRequest())
elif type(req) == dict:
# Single request
res = _parseJsonRpcObject(req)
if isinstance(res, RpcRequest):
requests.append(res)
elif isinstance(res, RpcResponse):
responses.append(res)
elif isinstance(res, errors.RpcError):
rpc_errors.append(res)
else:
rpc_errors.append(JsonRpc_ParseError())
except Exception as e:
# No JSON object could be decoded
rpc_errors.append(JsonRpc_ParseError())
return (requests, responses, rpc_errors)
def encode(requests, responses):
"""
:param requests:
:param responses:
:return: str
"""
ret = []
for rpc_obj in requests + responses:
if type(rpc_obj) == RpcRequest:
rpc_obj.__class__ = JsonRpc_Request
if type(rpc_obj) == RpcResponse:
rpc_obj.__class__ = JsonRpc_Response
if isinstance(rpc_obj, errors.RpcError) and type(rpc_obj) not in JsonRpcErrors.values():
rpc_obj.__class__ = JsonRpc_error_map.get(type(rpc_obj), JsonRpc_InternalError)
rpc_dict = rpc_obj.export()
ret.append(rpc_dict)
if len(ret) == 1:
return str(json.dumps(ret[0]))
elif len(ret) > 1:
return str(json.dumps(ret))
else:
return ''
|
Zlash65/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/employee_onboarding/employee_onboarding.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.hr.utils import EmployeeBoardingController
from frappe.model.mapper import get_mapped_doc
class IncompleteTaskError(frappe.ValidationError): pass
class EmployeeOnboarding(EmployeeBoardingController):
def validate(self):
super(EmployeeOnboarding, self).validate()
def validate_employee_creation(self):
if self.docstatus != 1:
frappe.throw(_("Submit this to create the Employee record"))
else:
for activity in self.activities:
if not activity.required_for_employee_creation:
continue
else:
task_status = frappe.db.get_value("Task", activity.task, "status")
if task_status not in ["Completed", "Cancelled"]:
frappe.throw(_("All the mandatory Task for employee creation hasn't been done yet."), IncompleteTaskError)
def on_submit(self):
super(EmployeeOnboarding, self).on_submit()
def on_update_after_submit(self):
self.create_task_and_notify_user()
def on_cancel(self):
super(EmployeeOnboarding, self).on_cancel()
@frappe.whitelist()
def make_employee(source_name, target_doc=None):
doc = frappe.get_doc("Employee Onboarding", source_name)
doc.validate_employee_creation()
def set_missing_values(source, target):
target.personal_email = frappe.db.get_value("Job Applicant", source.job_applicant, "email_id")
target.status = "Active"
doc = get_mapped_doc("Employee Onboarding", source_name, {
"Employee Onboarding": {
"doctype": "Employee",
"field_map": {
"first_name": "employee_name",
"employee_grade": "grade",
}}
}, target_doc, set_missing_values)
return doc
|
suncycheng/intellij-community
|
refs/heads/master
|
python/lib/Lib/sre_parse.py
|
116
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
def set(seq):
s = {}
for elem in seq:
s[elem] = 1
return s
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def __getslice__(self, start, stop):
return SubPattern(self.pattern, self.data[start:stop])
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if hi:
max = int(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
|
pipermerriam/django
|
refs/heads/master
|
tests/utils_tests/test_numberformat.py
|
307
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3,
thousand_sep='comma', force_grouping=True),
'10comma000')
def test_large_number(self):
most_max = ('{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}')
most_max2 = ('{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736')
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super(EuroDecimal, self).__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
|
LumPenPacK/NetworkExtractionFromImages
|
refs/heads/master
|
osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy_1.11/numpy/testing/setup.py
|
151
|
#!/usr/bin/env python
from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer="NumPy Developers",
maintainer_email="numpy-dev@numpy.org",
description="NumPy test module",
url="http://www.numpy.org",
license="NumPy License (BSD Style)",
configuration=configuration,
)
|
commtrack/temp-aquatest
|
refs/heads/master
|
apps/xformmanager/tests/xsd_checker.py
|
1
|
from xformmanager.tests.util import *
from xformmanager.xformdef import FormDef
from decimal import Decimal
from datetime import *
import unittest
class CompatibleTestCase(unittest.TestCase):
def setUp(self):
self.f1 = FormDef.from_file( get_file("data/versioning/base.xsd") )
def testSame(self):
""" testSame """
diff = self.f1.get_differences(self.f1)
self.assertTrue(diff.is_empty())
def testAddAndRemove(self):
""" testAddAndRemove """
self.f1 = FormDef.from_file( get_file("data/versioning/base.xsd") )
f2 = FormDef.from_file( get_file("data/versioning/field_added.xsd") )
diff = self.f1.get_differences(f2)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==3)
self.assertTrue(len(diff.fields_changed)==0)
self.assertTrue(len(diff.fields_removed)==0)
diff = f2.get_differences(self.f1)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==3)
self.assertTrue(len(diff.fields_changed)==0)
def testChangeEnumAddAndRemove(self):
""" testChangeEnumAddAndRemove """
f2 = FormDef.from_file( get_file("data/versioning/field_changed_enum.xsd") )
diff = self.f1.get_differences(f2)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_changed)==0)
self.assertTrue(len(diff.fields_removed)==0)
self.assertTrue(len(diff.types_changed)==1)
diff = f2.get_differences(self.f1)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_changed)==0)
self.assertTrue(len(diff.fields_removed)==0)
self.assertTrue(len(diff.types_changed)==1)
def testChangeLeafRepeats(self):
""" testChangeLeafRepeats """
# make repeatable
f2 = FormDef.from_file( get_file("data/versioning/field_changed_repeatable_leaf.xsd") )
diff = self.f1.get_differences(f2)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==0)
self.assertTrue(len(diff.fields_changed)==1)
# make not repeatable
diff = f2.get_differences(self.f1)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==0)
self.assertTrue(len(diff.fields_changed)==1)
def testChangeNodeRepeats(self):
""" testChangeNodeRepeats """
# make repeatable
f1 = FormDef.from_file( get_file("data/versioning/repeats.xsd") )
f2 = FormDef.from_file( get_file("data/versioning/field_changed_repeatable_node.xsd") )
diff = f1.get_differences(f2)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==0)
# when the parent becomes repeatable, both parent and child have changed
self.assertTrue(len(diff.fields_changed)==2)
# make not repeatable
diff = f2.get_differences(f1)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==0)
# when the parent becomes repeatable, both parent and child have changed
self.assertTrue(len(diff.fields_changed)==2)
def testChangeType(self):
""" testChangeType """
f2 = FormDef.from_file( get_file("data/versioning/field_changed_type.xsd") )
diff = self.f1.get_differences(f2)
self.assertFalse(diff.is_empty())
self.assertTrue(len(diff.fields_added)==0)
self.assertTrue(len(diff.fields_removed)==0)
self.assertTrue(len(diff.fields_changed)==3)
|
Artimi/neng
|
refs/heads/master
|
neng/cmaes.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (C) 2013 Petr Šebek
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERWISE
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import collections
import logging
import numpy as np
import scipy.optimize
class CMAES(object):
"""
Class CMAES represent algorithm Covariance Matrix Adaptation - Evolution
Strategy. It provides function minimization.
"""
def __init__(self, func, N, sigma=0.3, xmean=None):
"""
:param func: function to be minimized
:type func: function
:param N: number of parameter of function
:type N: int
:param sigma: step size of method
:type sigma: float
:param xmean: initial point, if None some is generated
:type xmean: np.array
"""
self.func = func
self.N = N
self.store_parameters = {'func': func,
'N': N,
'sigma': sigma,
'xmean': xmean,
}
self.stopeval = 1e4 * self.N / 2
self.stopfitness = 1e-10
self.eigenval = 0
# generation loop
self.counteval = 0
self.generation = 0
#stop criteria
self.stop_criteria = ("Fitness",
"MaxEval",
"NoEffectAxis",
"NoEffectCoord",
#"Stagnation",
"ConditionCov"
"TolXUp",
"TolFun",
"TolX")
self.tolfun = 1e-12
self.tolxup = 1e4
self.condition_cov_max = 1e14
self.lamda = int(4 + 3 * np.log(self.N))
self.initVariables(sigma, xmean)
def initVariables(self, sigma, xmean, lamda_factor=1):
"""
Init variables that can change after restart of method, basically that
are dependent on lamda.
:param sigma: step size
:type sigma: float
:param xmean: initial point
:type xmean: np.array
:param lamda_factor: factor for multyplying old lambda, serves for restarting method
:type lamda_factor: float
"""
self.sigma = sigma
if xmean is None:
self.xmean = np.random.rand(self.N)
else:
self.xmean = xmean
self.status = -1
# strategy parameter setting: selection
self.lamda *= lamda_factor
self.mu = self.lamda / 2
self.weights = np.array([np.log(self.mu + 0.5) - np.log(i)
for i in range(1, int(self.mu) + 1)])
self.mu = int(self.mu)
self.weights = self.weights / np.sum(self.weights)
self.mueff = 1 / np.sum(self.weights ** 2)
# strategy parameter setting: adaptation
self.cc = (4 + self.mueff / self.N) / (self.N + 4 + 2 * self.mueff / self.N)
self.cs = (self.mueff + 2) / (self.N + self.mueff + 5)
self.c1 = 2 / ((self.N + 1.3) ** 2 + self.mueff)
self.cmu = min(1 - self.c1, 2 * (self.mueff - 2 + 1 / self.mueff) /
((self.N + 2) ** 2 + self.mueff))
self.damps = 1 + 2 * max(0, np.sqrt((self.mueff - 1) /
(self.N + 1)) - 1) + self.cs
# initialize dynamic (internal) strategy parameters and constants
self.pc = np.zeros((1, self.N))
self.ps = np.zeros_like(self.pc)
self.B = np.eye(self.N)
self.D = np.eye(self.N)
self.C = np.identity(self.N)
self.chiN = self.N ** 0.5 * (1 - 1 / (4 * self.N) + 1 /
(21 * self.N ** 2))
# termination
self.tolx = 1e-12 * self.sigma
self.short_history_len = 10 + np.ceil(30 * self.N / self.lamda)
self.long_history_len_down = 120 + 30 * self.N / self.lamda
self.long_history_len_up = 20000
self.history = {}
self.history['short_best'] = collections.deque()
self.history['long_best'] = collections.deque()
self.history['long_median'] = collections.deque()
def newGeneration(self):
"""
Generate new generation of individuals.
:rtype: np.array
:return: new generation
"""
self.generation += 1
self.arz = np.random.randn(self.lamda, self.N)
self.arx = self.xmean + self.sigma * np.dot(np.dot(self.B, self.D), self.arz.T).T
return self.arx
def update(self, arfitness):
"""
Update values of method from new evaluated generation
:param arfitness: list of function values to individuals
:type arfitness: list
"""
self.counteval += self.lamda
self.arfitness = arfitness
# sort by fitness and compute weighted mean into xmean
self.arindex = np.argsort(self.arfitness)
self.arfitness = self.arfitness[self.arindex]
self.xmean = np.dot(self.arx[self.arindex[:self.mu]].T, self.weights)
self.zmean = np.dot(self.arz[self.arindex[:self.mu]].T, self.weights)
self.ps = np.dot((1 - self.cs), self.ps) + np.dot((np.sqrt(self.cs * (2 - self.cs) * self.mueff)),
np.dot(self.B, self.zmean))
self.hsig = np.linalg.norm(self.ps) / np.sqrt(
1 - (1 - self.cs) ** (2 * self.counteval / self.lamda)) / self.chiN < 1.4 + 2 / (self.N + 1)
self.pc = np.dot((1 - self.cc), self.pc) + np.dot(
np.dot(self.hsig, np.sqrt(self.cc * (2 - self.cc) * self.mueff)),
np.dot(np.dot(self.B, self.D), self.zmean))
# adapt covariance matrix C
self.C = np.dot((1 - self.c1 - self.cmu), self.C) \
+ np.dot(self.c1, ((self.pc * self.pc.T)
+ np.dot((1 - self.hsig) * self.cc * (2 - self.cc), self.C))) \
+ np.dot(self.cmu,
np.dot(np.dot(np.dot(np.dot(self.B, self.D), self.arz[self.arindex[:self.mu]].T),
np.diag(self.weights)),
(np.dot(np.dot(self.B, self.D), self.arz[self.arindex[:self.mu]].T)).T))
# adapt step size sigma
self.sigma = self.sigma * np.exp((self.cs / self.damps) * (np.linalg.norm(self.ps) / self.chiN - 1))
# diagonalization
if self.counteval - self.eigenval > self.lamda / (self.c1 + self.cmu) / self.N / 10:
self.eigenval = self.counteval
self.C = np.triu(self.C) + np.triu(self.C, 1).T
self.D, self.B = np.linalg.eig(self.C)
self.D = np.diag(np.sqrt(self.D))
#history
self.history['short_best'].append(arfitness[0])
if len(self.history['short_best']) >= self.short_history_len:
self.history['short_best'].popleft()
if self.generation % 5 == 0: # last 20 %
self.history['long_best'].append(arfitness[0])
self.history['long_median'].append(np.median(arfitness))
if len(self.history['long_best']) >= self.long_history_len_up:
self.history['long_best'].popleft()
self.history['long_median'].popleft()
self.checkStop()
if self.generation % 20 == 0:
self.logState()
def fmin(self):
"""
Method for actual function minimization. Iterates while not end.
If unsuccess termination criteria is met then the method is restarted
with doubled population. If the number of maximum evaluations is
reached or the function is acceptable minimized, iterations ends and
result is returned.
:rtype: scipy.optimize.OptimizeResult
"""
while self.status != 0 and self.status != 1:
if self.status > 2:
logging.warning("Restart due to %s", self.stop_criteria[self.status])
self.restart(2)
pop = self.newGeneration()
values = np.empty(pop.shape[0])
for i in range(pop.shape[0]):
values[i] = self.func(pop[i])
self.update(values)
return self.result
def restart(self, lamda_factor):
"""
Restart whole method to initial state, but with population multiplied
by lamda_factor.
:param lamda_factor: multiply factor
:type lamda_factor: int
"""
self.initVariables(self.store_parameters['sigma'], np.random.rand(self.N), lamda_factor=lamda_factor)
def checkStop(self):
"""
Termination criteria of method. They are checked every iteration. If any of them is true, computation should end.
:return: True if some termination criteria was met, False otherwise
:rtype: bool
"""
i = self.generation % self.N
self.stop_conditions = (self.arfitness[0] <= self.stopfitness,
self.counteval > self.stopeval,
sum(self.xmean == self.xmean + 0.1 * self.sigma * self.D[i] * self.B[:, i]) == self.N,
np.any(self.xmean == self.xmean + 0.2 * self.sigma * np.sqrt(np.diag(self.C))),
#len(self.history['long_median']) > self.long_history_len_down and \
#np.median(list(itertools.islice(self.history['long_median'], int(0.7*len(self.history['long_median'])), None))) <= \
#np.median(list(itertools.islice(self.history['long_median'],int(0.3*len(self.history['long_median']))))),
np.linalg.cond(self.C) > self.condition_cov_max,
self.sigma * np.max(self.D) >= self.tolxup,
max(self.history['short_best']) - min(self.history['short_best']) <= self.tolfun and
self.arfitness[-1] - self.arfitness[0] <= self.tolfun,
np.all(self.sigma * self.pc < self.tolx) and np.all(
self.sigma * np.sqrt(np.diag(self.C)) < self.tolx)
)
if np.any(self.stop_conditions):
self.status = self.stop_conditions.index(True)
return True
def logState(self):
"""
Function for logging the progress of method.
"""
logging.debug(
"generation: {generation:<5}, v: {v_function:<6.2e}, sigma: {sigma:.2e}, best: {best}, xmean: {xmean}".format(
generation=self.generation, best=[round(x, 8) for x in self.arx[self.arindex[0]]],
v_function=self.arfitness[0], sigma=self.sigma, xmean=self.xmean))
@property
def result(self):
"""
Result of computation. Not returned while minimization is in progress.
:return: result of computation
:rtype: scipy.optimize.OptimizeResult
"""
if self.status < 0:
raise AttributeError("Result is not ready yet, cmaes is not finished")
else:
self._result = scipy.optimize.OptimizeResult()
self._result['x'] = self.arx[self.arindex[0]]
self._result['fun'] = self.arfitness[0]
self._result['nfev'] = self.counteval
if self.status == 0:
self._result['success'] = True
self._result['status'] = self.status
self._result['message'] = "Optimization terminated successfully."
else:
self._result['success'] = False
self._result['status'] = self.status
self._result['message'] = self.stop_criteria[self.status]
return self._result
def fmin(func, N):
"""
Function for easy call function minimization from other modules.
:param func: function to be minimized
:type func: function
:param N: number of parameters of given function
:type N: int
:return: resulting statistics of computation with result
:rtype: scipy.optimize.OptimizeResult
"""
c = CMAES(func, N)
return c.fmin()
|
petrjasek/superdesk-core
|
refs/heads/master
|
content_api/companies/__init__.py
|
1
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from content_api.companies.resource import CompaniesResource
import superdesk
from superdesk.services import BaseService
def init_app(app):
endpoint_name = "companies"
service = BaseService(endpoint_name, backend=superdesk.get_backend())
CompaniesResource(endpoint_name, app=app, service=service)
|
kxepal/simpleubjson
|
refs/heads/master
|
simpleubjson/tools/inspect.py
|
4
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import sys
import simpleubjson
from ..draft8 import Draft8Decoder
from ..draft9 import Draft9Decoder
from ..exceptions import EarlyEndOfStreamError
def pprint(data, output=sys.stdout, allow_noop=True,
indent=' ' * 4, max_level=None, spec='draft-9'):
"""Pretty prints ubjson data using the handy [ ]-notation to represent it in
readable form. Example::
[{]
[S] [i] [2] [id]
[I] [1234567890]
[S] [i] [4] [name]
[S] [i] [3] [bob]
[}]
:param data: `.read([size])`-able object or source string with ubjson data.
:param output: `.write([data])`-able object.
:param allow_noop: Allow emit :const:`~simpleubjson.NOOP` or not.
:param indent: Indention string.
:param max_level: Max level of inspection nested containers. By default
there is no limit, but you may hit system recursion limit.
:param spec: UBJSON specification. Supported Draft-8 and Draft-9
specifications by ``draft-8`` or ``draft-9`` keys.
:type spec: str
"""
def maybe_write(data, level):
if max_level is None or level <= max_level:
output.write('%s' % (indent * level))
output.write(data)
output.flush()
def inspect_draft8(decoder, level, container_size):
while 1:
try:
tag, length, value = decoder.next_tlv()
utag = tag.decode()
except EarlyEndOfStreamError:
break
# standalone markers
if length is None and value is None:
if utag == 'E':
maybe_write('[%s]\n' % (utag,), level - 1)
return
else:
maybe_write('[%s]\n' % (utag,), level)
# sized containers
elif length is not None and value is None:
maybe_write('[%s] [%s]\n' % (utag, length), level)
if utag in 'oO':
length = length == 255 and length or length * 2
inspect_draft8(decoder, level + 1, length)
# plane values
elif length is None and value is not None:
value = decoder.dispatch[tag](decoder, tag, length, value)
maybe_write('[%s] [%s]\n' % (utag, value), level)
# sized values
else:
value = decoder.dispatch[tag](decoder, tag, length, value)
maybe_write('[%s] [%s] [%s]\n' % (utag, length, value), level)
if container_size != 255:
container_size -= 1
if not container_size:
return
def inspect_draft9(decoder, level, *args):
while 1:
try:
tag, length, value = decoder.next_tlv()
utag = tag.decode()
except EarlyEndOfStreamError:
break
# standalone markers
if length is None and value is None:
if utag in ']}':
level -= 1
maybe_write('[%s]\n' % (utag,), level)
if utag in '{[':
level += 1
# plane values
elif length is None and value is not None:
value = decoder.dispatch[tag](decoder, tag, length, value)
maybe_write('[%s] [%s]\n' % (utag, value), level)
# sized values
else:
value = decoder.dispatch[tag](decoder, tag, length, value)
pattern = '[%s] [%s] [%s] [%s]\n'
# very dirty hack to show size as marker and value
_decoder = Draft9Decoder(simpleubjson.encode(length, spec=spec))
tlv = _decoder.next_tlv()
args = tuple([utag, tlv[0].decode(), tlv[2], value])
maybe_write(pattern % args, level)
if spec.lower() in ['draft8', 'draft-8']:
decoder = Draft8Decoder(data, allow_noop)
inspect = inspect_draft8
elif spec.lower() in ['draft9', 'draft-9']:
decoder = Draft9Decoder(data, allow_noop)
inspect = inspect_draft9
else:
raise ValueError('Unknown or unsupported specification %s' % spec)
inspect(decoder, 0, 255)
|
richpolis/siveinpy
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
freshplanet/AppEngine-SocketPool
|
refs/heads/master
|
tlslite/utils/rijndael.py
|
3
|
# Authors:
# Bram Cohen
# Trevor Perrin - various changes
#
# See the LICENSE file for legal information regarding use of this file.
# Also see Bram Cohen's statement below
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, bram@gawth.com, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
#-----------------------
#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN
#2.4.....
import os
if os.name != "java":
import exceptions
if hasattr(exceptions, "FutureWarning"):
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, append=1)
#-----------------------
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in xrange(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in xrange(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in xrange(256)]
box[1][7] = 1
for i in xrange(2, 256):
j = alog[255 - log[i]]
for t in xrange(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in xrange(256)]
for i in xrange(256):
for t in xrange(8):
cox[i][t] = B[t]
for j in xrange(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in xrange(256):
S[i] = cox[i][0] << 7
for t in xrange(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in xrange(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in xrange(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in xrange(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in xrange(4):
if i != t:
for j in xrange(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in xrange(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in xrange(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in xrange(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in xrange(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in xrange(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i-1]
else:
for i in xrange(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in xrange(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in xrange(1, ROUNDS):
for j in xrange(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in xrange(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in xrange(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
|
arbrandes/edx-platform
|
refs/heads/master
|
openedx/features/announcements/migrations/0001_initial.py
|
4
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(default='lorem ipsum', max_length=1000)),
('active', models.BooleanField(default=True)),
],
),
]
|
IONISx/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/__init__.py
|
11
|
"""
This module provides an abstraction for working with XModuleDescriptors
that are stored in a database an accessible using their Location as an identifier
"""
import logging
import re
import json
import datetime
from pytz import UTC
from collections import defaultdict
import collections
from contextlib import contextmanager
import threading
from operator import itemgetter
from sortedcontainers import SortedListWithKey
from abc import ABCMeta, abstractmethod
from contracts import contract, new_contract
from xblock.plugin import default_select
from .exceptions import InvalidLocationError, InsufficientSpecificationError
from xmodule.errortracker import make_error_tracker
from xmodule.assetstore import AssetMetadata
from opaque_keys.edx.keys import CourseKey, UsageKey, AssetKey
from opaque_keys.edx.locations import Location # For import backwards compatibility
from xblock.runtime import Mixologist
from xblock.core import XBlock
log = logging.getLogger('edx.modulestore')
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('XBlock', XBlock)
LIBRARY_ROOT = 'library.xml'
COURSE_ROOT = 'course.xml'
class ModuleStoreEnum(object):
"""
A class to encapsulate common constants that are used with the various modulestores.
"""
class Type(object):
"""
The various types of modulestores provided
"""
split = 'split'
mongo = 'mongo'
xml = 'xml'
class RevisionOption(object):
"""
Revision constants to use for Module Store operations
Note: These values are passed into store APIs and only used at run time
"""
# both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions
draft_preferred = 'rev-opt-draft-preferred'
# only DRAFT versions are queried and no PUBLISHED versions
draft_only = 'rev-opt-draft-only'
# # only PUBLISHED versions are queried and no DRAFT versions
published_only = 'rev-opt-published-only'
# all revisions are queried
all = 'rev-opt-all'
class Branch(object):
"""
Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED
Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps
"""
draft_preferred = 'draft-preferred'
published_only = 'published-only'
class BranchName(object):
"""
Branch constants to use for stores, such as Split, that have named branches
"""
draft = 'draft-branch'
published = 'published-branch'
library = 'library'
class UserID(object):
"""
Values for user ID defaults
"""
# Note: we use negative values here to (try to) not collide
# with user identifiers provided by actual user services.
# user ID to use for all management commands
mgmt_command = -1
# user ID to use for primitive commands
primitive_command = -2
# user ID to use for tests that do not have a django user available
test = -3
# user ID for automatic update by the system
system = -4
class SortOrder(object):
"""
Values for sorting asset metadata.
"""
ascending = 1
descending = 2
class BulkOpsRecord(object):
"""
For handling nesting of bulk operations
"""
def __init__(self):
self._active_count = 0
self.has_publish_item = False
self.has_library_updated_item = False
@property
def active(self):
"""
Return whether this bulk write is active.
"""
return self._active_count > 0
def nest(self):
"""
Record another level of nesting of this bulk write operation
"""
self._active_count += 1
def unnest(self):
"""
Record the completion of a level of nesting of the bulk write operation
"""
self._active_count -= 1
@property
def is_root(self):
"""
Return whether the bulk write is at the root (first) level of nesting
"""
return self._active_count == 1
class ActiveBulkThread(threading.local):
"""
Add the expected vars to the thread.
"""
def __init__(self, bulk_ops_record_type, **kwargs):
super(ActiveBulkThread, self).__init__(**kwargs)
self.records = defaultdict(bulk_ops_record_type)
class BulkOperationsMixin(object):
"""
This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
def __init__(self, *args, **kwargs):
super(BulkOperationsMixin, self).__init__(*args, **kwargs)
self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type)
self.signal_handler = None
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree
until the bulk operation is completed.
"""
try:
self._begin_bulk_operation(course_id)
yield
finally:
self._end_bulk_operation(course_id, emit_signals)
# the relevant type of bulk_ops_record for the mixin (overriding classes should override
# this variable)
_bulk_ops_record_type = BulkOpsRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.BulkOpsRecord` for this course.
"""
if course_key is None:
return self._bulk_ops_record_type()
# Retrieve the bulk record based on matching org/course/run (possibly ignoring case)
if ignore_case:
for key, record in self._active_bulk_ops.records.iteritems():
# Shortcut: check basic equivalence for cases where org/course/run might be None.
if key == course_key or (
key.org.lower() == course_key.org.lower() and
key.course.lower() == course_key.course.lower() and
key.run.lower() == course_key.run.lower()
):
return record
return self._active_bulk_ops.records[course_key.for_branch(None)]
@property
def _active_records(self):
"""
Yield all active (CourseLocator, BulkOpsRecord) tuples.
"""
for course_key, record in self._active_bulk_ops.records.iteritems():
if record.active:
yield (course_key, record)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if course_key.for_branch(None) in self._active_bulk_ops.records:
del self._active_bulk_ops.records[course_key.for_branch(None)]
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key):
"""
The outermost nested bulk_operation call: do the actual begin of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _begin_bulk_operation(self, course_key):
"""
Begin a bulk operation on course_key.
"""
bulk_ops_record = self._get_bulk_ops_record(course_key)
# Increment the number of active bulk operations (bulk operations
# on the same course can be nested)
bulk_ops_record.nest()
# If this is the highest level bulk operation, then initialize it
if bulk_ops_record.is_root:
self._start_outermost_bulk_operation(bulk_ops_record, course_key)
def _end_outermost_bulk_operation(self, bulk_ops_record, structure_key):
"""
The outermost nested bulk_operation call: do the actual end of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _end_bulk_operation(self, structure_key, emit_signals=True):
"""
End the active bulk operation on structure_key (course or library key).
"""
# If no bulk op is active, return
bulk_ops_record = self._get_bulk_ops_record(structure_key)
if not bulk_ops_record.active:
return
# Send the pre-publish signal within the context of the bulk operation.
# Writes performed by signal handlers will be persisted when the bulk
# operation ends.
if emit_signals and bulk_ops_record.is_root:
self.send_pre_publish_signal(bulk_ops_record, structure_key)
bulk_ops_record.unnest()
# If this wasn't the outermost context, then don't close out the
# bulk operation.
if bulk_ops_record.active:
return
dirty = self._end_outermost_bulk_operation(bulk_ops_record, structure_key)
# The bulk op has ended. However, the signal tasks below still need to use the
# built-up bulk op information (if the signals trigger tasks in the same thread).
# So re-nest until the signals are sent.
bulk_ops_record.nest()
if emit_signals and dirty:
self.send_bulk_published_signal(bulk_ops_record, structure_key)
self.send_bulk_library_updated_signal(bulk_ops_record, structure_key)
# Signals are sent. Now unnest and clear the bulk op for good.
bulk_ops_record.unnest()
self._clear_bulk_ops_record(structure_key)
def _is_in_bulk_operation(self, course_key, ignore_case=False):
"""
Return whether a bulk operation is active on `course_key`.
"""
return self._get_bulk_ops_record(course_key, ignore_case).active
def send_pre_publish_signal(self, bulk_ops_record, course_id):
"""
Send a signal just before items are published in the course.
"""
signal_handler = getattr(self, "signal_handler", None)
if signal_handler and bulk_ops_record.has_publish_item:
signal_handler.send("pre_publish", course_key=course_id)
def send_bulk_published_signal(self, bulk_ops_record, course_id):
"""
Sends out the signal that items have been published from within this course.
"""
if self.signal_handler and bulk_ops_record.has_publish_item:
# We remove the branch, because publishing always means copying from draft to published
self.signal_handler.send("course_published", course_key=course_id.for_branch(None))
bulk_ops_record.has_publish_item = False
def send_bulk_library_updated_signal(self, bulk_ops_record, library_id):
"""
Sends out the signal that library have been updated.
"""
if self.signal_handler and bulk_ops_record.has_library_updated_item:
self.signal_handler.send("library_updated", library_key=library_id)
bulk_ops_record.has_library_updated_item = False
class EditInfo(object):
"""
Encapsulates the editing info of a block.
"""
def __init__(self, **kwargs):
self.from_storable(kwargs)
# For details, see caching_descriptor_system.py get_subtree_edited_by/on.
self._subtree_edited_on = kwargs.get('_subtree_edited_on', None)
self._subtree_edited_by = kwargs.get('_subtree_edited_by', None)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'previous_version': self.previous_version,
'update_version': self.update_version,
'source_version': self.source_version,
'edited_on': self.edited_on,
'edited_by': self.edited_by,
'original_usage': self.original_usage,
'original_usage_version': self.original_usage_version,
}
def from_storable(self, edit_info):
"""
De-serialize from Mongo-storable format to an object.
"""
# Guid for the structure which previously changed this XBlock.
# (Will be the previous value of 'update_version'.)
self.previous_version = edit_info.get('previous_version', None)
# Guid for the structure where this XBlock got its current field values.
# May point to a structure not in this structure's history (e.g., to a draft
# branch from which this version was published).
self.update_version = edit_info.get('update_version', None)
self.source_version = edit_info.get('source_version', None)
# Datetime when this XBlock's fields last changed.
self.edited_on = edit_info.get('edited_on', None)
# User ID which changed this XBlock last.
self.edited_by = edit_info.get('edited_by', None)
# If this block has been copied from a library using copy_from_template,
# these fields point to the original block in the library, for analytics.
self.original_usage = edit_info.get('original_usage', None)
self.original_usage_version = edit_info.get('original_usage_version', None)
def __repr__(self):
# pylint: disable=bad-continuation, redundant-keyword-arg
return ("{classname}(previous_version={self.previous_version}, "
"update_version={self.update_version}, "
"source_version={source_version}, "
"edited_on={self.edited_on}, "
"edited_by={self.edited_by}, "
"original_usage={self.original_usage}, "
"original_usage_version={self.original_usage_version}, "
"_subtree_edited_on={self._subtree_edited_on}, "
"_subtree_edited_by={self._subtree_edited_by})").format(
self=self,
classname=self.__class__.__name__,
source_version="UNSET" if self.source_version is None else self.source_version,
) # pylint: disable=bad-continuation
def __eq__(self, edit_info):
"""
Two EditInfo instances are equal iff their storable representations
are equal.
"""
return self.to_storable() == edit_info.to_storable()
def __neq__(self, edit_info):
"""
Two EditInfo instances are not equal if they're not equal.
"""
return not self == edit_info
class BlockData(object):
"""
Wrap the block data in an object instead of using a straight Python dictionary.
Allows the storing of meta-information about a structure that doesn't persist along with
the structure itself.
"""
def __init__(self, **kwargs):
# Has the definition been loaded?
self.definition_loaded = False
self.from_storable(kwargs)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'fields': self.fields,
'block_type': self.block_type,
'definition': self.definition,
'defaults': self.defaults,
'edit_info': self.edit_info.to_storable()
}
def from_storable(self, block_data):
"""
De-serialize from Mongo-storable format to an object.
"""
# Contains the Scope.settings and 'children' field values.
# 'children' are stored as a list of (block_type, block_id) pairs.
self.fields = block_data.get('fields', {})
# XBlock type ID.
self.block_type = block_data.get('block_type', None)
# DB id of the record containing the content of this XBlock.
self.definition = block_data.get('definition', None)
# Scope.settings default values copied from a template block (used e.g. when
# blocks are copied from a library to a course)
self.defaults = block_data.get('defaults', {})
# EditInfo object containing all versioning/editing data.
self.edit_info = EditInfo(**block_data.get('edit_info', {}))
def __repr__(self):
# pylint: disable=bad-continuation, redundant-keyword-arg
return ("{classname}(fields={self.fields}, "
"block_type={self.block_type}, "
"definition={self.definition}, "
"definition_loaded={self.definition_loaded}, "
"defaults={self.defaults}, "
"edit_info={self.edit_info})").format(
self=self,
classname=self.__class__.__name__,
) # pylint: disable=bad-continuation
def __eq__(self, block_data):
"""
Two BlockData objects are equal iff all their attributes are equal.
"""
attrs = ['fields', 'block_type', 'definition', 'defaults', 'edit_info']
return all(getattr(self, attr) == getattr(block_data, attr) for attr in attrs)
def __neq__(self, block_data):
"""
Just define this as not self.__eq__(block_data)
"""
return not self == block_data
new_contract('BlockData', BlockData)
class IncorrectlySortedList(Exception):
"""
Thrown when calling find() on a SortedAssetList not sorted by filename.
"""
pass
class SortedAssetList(SortedListWithKey):
"""
List of assets that is sorted based on an asset attribute.
"""
def __init__(self, **kwargs):
self.filename_sort = False
key_func = kwargs.get('key', None)
if key_func is None:
kwargs['key'] = itemgetter('filename')
self.filename_sort = True
super(SortedAssetList, self).__init__(**kwargs)
@contract(asset_id=AssetKey)
def find(self, asset_id):
"""
Find the index of a particular asset in the list. This method is only functional for lists
sorted by filename. If the list is sorted on any other key, find() raises a
Returns: Index of asset, if found. None if not found.
"""
# Don't attempt to find an asset by filename in a list that's not sorted by filename.
if not self.filename_sort:
raise IncorrectlySortedList()
# See if this asset already exists by checking the external_filename.
# Studio doesn't currently support using multiple course assets with the same filename.
# So use the filename as the unique identifier.
idx = None
idx_left = self.bisect_left({'filename': asset_id.path})
idx_right = self.bisect_right({'filename': asset_id.path})
if idx_left != idx_right:
# Asset was found in the list.
idx = idx_left
return idx
@contract(asset_md=AssetMetadata)
def insert_or_update(self, asset_md):
"""
Insert asset metadata if asset is not present. Update asset metadata if asset is already present.
"""
metadata_to_insert = asset_md.to_storable()
asset_idx = self.find(asset_md.asset_id)
if asset_idx is None:
# Add new metadata sorted into the list.
self.add(metadata_to_insert)
else:
# Replace existing metadata.
self[asset_idx] = metadata_to_insert
class ModuleStoreAssetBase(object):
"""
The methods for accessing assets and their metadata
"""
def _find_course_asset(self, asset_key):
"""
Returns same as _find_course_assets plus the index to the given asset or None. Does not convert
to AssetMetadata; thus, is internal.
Arguments:
asset_key (AssetKey): what to look for
Returns:
Tuple of:
- AssetMetadata[] for all assets of the given asset_key's type
- the index of asset in list (None if asset does not exist)
"""
course_assets = self._find_course_assets(asset_key.course_key)
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_key.block_type, []))
idx = all_assets.find(asset_key)
return course_assets, idx
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return None
mdata = AssetMetadata(asset_key, asset_key.path, **kwargs)
all_assets = course_assets[asset_key.asset_type]
mdata.from_storable(all_assets[asset_idx])
return mdata
@contract(
course_key='CourseKey', asset_type='None | basestring',
start='int | None', maxresults='int | None', sort='tuple(str,(int,>=1,<=2))|None'
)
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of asset metadata for all assets of the given asset_type in the course.
Args:
course_key (CourseKey): course identifier
asset_type (str): the block_type of the assets to return. If None, return assets of all types.
start (int): optional - start at this asset number. Zero-based!
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of SortOrder.ascending or SortOrder.descending
Returns:
List of AssetMetadata objects.
"""
course_assets = self._find_course_assets(course_key)
# Determine the proper sort - with defaults of ('displayname', SortOrder.ascending).
key_func = None
sort_order = ModuleStoreEnum.SortOrder.ascending
if sort:
if sort[0] == 'uploadDate':
key_func = lambda x: x['edit_info']['edited_on']
if sort[1] == ModuleStoreEnum.SortOrder.descending:
sort_order = ModuleStoreEnum.SortOrder.descending
if asset_type is None:
# Add assets of all types to the sorted list.
all_assets = SortedAssetList(iterable=[], key=key_func)
for asset_type, val in course_assets.iteritems():
all_assets.update(val)
else:
# Add assets of a single type to the sorted list.
all_assets = SortedAssetList(iterable=course_assets.get(asset_type, []), key=key_func)
num_assets = len(all_assets)
start_idx = start
end_idx = min(num_assets, start + maxresults)
if maxresults < 0:
# No limit on the results.
end_idx = num_assets
step_incr = 1
if sort_order == ModuleStoreEnum.SortOrder.descending:
# Flip the indices and iterate backwards.
step_incr = -1
start_idx = (num_assets - 1) - start_idx
end_idx = (num_assets - 1) - end_idx
ret_assets = []
for idx in xrange(start_idx, end_idx, step_incr):
raw_asset = all_assets[idx]
asset_key = course_key.make_asset_key(raw_asset['asset_type'], raw_asset['filename'])
new_asset = AssetMetadata(asset_key)
new_asset.from_storable(raw_asset)
ret_assets.append(new_asset)
return ret_assets
# pylint: disable=unused-argument
def check_supports(self, course_key, method):
"""
Verifies that a modulestore supports a particular method.
Some modulestores may differ based on the course_key, such
as mixed (since it has to find the underlying modulestore),
so it's required as part of the method signature.
"""
return hasattr(self, method)
class ModuleStoreAssetWriteInterface(ModuleStoreAssetBase):
"""
The write operations for assets and asset metadata
"""
def _save_assets_by_type(self, course_key, asset_metadata_list, course_assets, user_id, import_only):
"""
Common private method that saves/updates asset metadata items in the internal modulestore
structure used to store asset metadata items.
"""
# Lazily create a sorted list if not already created.
assets_by_type = defaultdict(lambda: SortedAssetList(iterable=course_assets.get(asset_type, [])))
for asset_md in asset_metadata_list:
if asset_md.asset_id.course_key != course_key:
# pylint: disable=logging-format-interpolation
log.warning("Asset's course {} does not match other assets for course {} - not saved.".format(
asset_md.asset_id.course_key, course_key
))
continue
if not import_only:
asset_md.update({'edited_by': user_id, 'edited_on': datetime.datetime.now(UTC)})
asset_type = asset_md.asset_id.asset_type
all_assets = assets_by_type[asset_type]
all_assets.insert_or_update(asset_md)
return assets_by_type
@contract(asset_metadata='AssetMetadata')
def save_asset_metadata(self, asset_metadata, user_id, import_only):
"""
Saves the asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
@contract(asset_metadata_list='list(AssetMetadata)')
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only):
"""
Saves a list of asset metadata for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if metadata save was successful, else False
"""
raise NotImplementedError()
def set_asset_metadata_attrs(self, asset_key, attrs, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
def delete_asset_metadata(self, asset_key, user_id):
"""
Base method to over-ride in modulestore.
"""
raise NotImplementedError()
@contract(asset_key='AssetKey', attr=str)
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id (int): user ID saving the asset metadata
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
return self.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
NOTE: unlike get_all_asset_metadata, this does not take an asset type because
this function is intended for things like cloning or exporting courses not for
clients to list assets.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int): user ID copying the asset metadata
"""
pass
# pylint: disable=abstract-method
class ModuleStoreRead(ModuleStoreAssetBase):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends read-only functionality
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_item(self, usage_key):
"""
Returns True if usage_key exists in this ModuleStore.
"""
pass
@abstractmethod
def get_item(self, usage_key, depth=0, using_descriptor_system=None, **kwargs):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
usage_key: A :class:`.UsageKey` subclass instance
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
pass
@abstractmethod
def get_course_errors(self, course_key):
"""
Return a list of (msg, exception-or-None) errors that the modulestore
encountered when loading the course at course_id.
Raises the same exceptions as get_item if the location isn't found or
isn't fully specified.
Args:
course_key (:class:`.CourseKey`): The course to check for errors
"""
pass
@abstractmethod
def get_items(self, course_id, qualifiers=None, **kwargs):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
"""
pass
@contract(block='XBlock | BlockData | dict', qualifiers=dict)
def _block_matches(self, block, qualifiers):
"""
Return True or False depending on whether the field value (block contents)
matches the qualifiers as per get_items.
NOTE: Method only finds directly set value matches - not inherited nor default value matches.
For substring matching:
pass a regex object.
For arbitrary function comparison such as date time comparison:
pass the function as in start=lambda x: x < datetime.datetime(2014, 1, 1, 0, tzinfo=pytz.UTC)
Args:
block (dict, XBlock, or BlockData): either the BlockData (transformed from the db) -or-
a dict (from BlockData.fields or get_explicitly_set_fields_by_scope) -or-
the xblock.fields() value -or-
the XBlock from which to get the 'fields' value.
qualifiers (dict): {field: value} search pairs.
"""
if isinstance(block, XBlock):
# If an XBlock is passed-in, just match its fields.
xblock, fields = (block, block.fields)
elif isinstance(block, BlockData):
# BlockData is an object - compare its attributes in dict form.
xblock, fields = (None, block.__dict__)
else:
xblock, fields = (None, block)
def _is_set_on(key):
"""
Is this key set in fields? (return tuple of boolean and value). A helper which can
handle fields either being the json doc or xblock fields. Is inner function to restrict
use and to access local vars.
"""
if key not in fields:
return False, None
field = fields[key]
if xblock is not None:
return field.is_set_on(block), getattr(xblock, key)
else:
return True, field
for key, criteria in qualifiers.iteritems():
is_set, value = _is_set_on(key)
if isinstance(criteria, dict) and '$exists' in criteria and criteria['$exists'] == is_set:
continue
if not is_set:
return False
if not self._value_matches(value, criteria):
return False
return True
def _value_matches(self, target, criteria):
"""
helper for _block_matches: does the target (field value) match the criteria?
If target is a list, do any of the list elements meet the criteria
If the criteria is a regex, does the target match it?
If the criteria is a function, does invoking it on the target yield something truthy?
If criteria is a dict {($nin|$in): []}, then do (none|any) of the list elements meet the criteria
Otherwise, is the target == criteria
"""
if isinstance(target, list):
return any(self._value_matches(ele, criteria) for ele in target)
elif isinstance(criteria, re._pattern_type): # pylint: disable=protected-access
return criteria.search(target) is not None
elif callable(criteria):
return criteria(target)
elif isinstance(criteria, dict) and '$in' in criteria:
# note isn't handling any other things in the dict other than in
return any(self._value_matches(target, test_val) for test_val in criteria['$in'])
elif isinstance(criteria, dict) and '$nin' in criteria:
# note isn't handling any other things in the dict other than nin
return not any(self._value_matches(target, test_val) for test_val in criteria['$nin'])
else:
return criteria == target
@abstractmethod
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
pass
@abstractmethod
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
pass
@abstractmethod
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses
in this modulestore. This method can take an optional argument 'org' which
will efficiently apply a filter so that only the courses of the specified
ORG in the CourseKey will be fetched.
'''
pass
@abstractmethod
def get_course(self, course_id, depth=0, **kwargs):
'''
Look for a specific course by its id (:class:`CourseKey`).
Returns the course descriptor, or None if not found.
'''
pass
@abstractmethod
def has_course(self, course_id, ignore_case=False, **kwargs):
'''
Look for a specific course id. Returns whether it exists.
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
'''
pass
@abstractmethod
def get_parent_location(self, location, **kwargs):
'''
Find the location that is the parent of this location in this
course. Needed for path_to_location().
'''
pass
@abstractmethod
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
pass
@abstractmethod
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
pass
@abstractmethod
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given
course_id. The return can be either "xml" (for XML based courses) or "mongo" for MongoDB backed courses
"""
pass
@abstractmethod
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
pass
@abstractmethod
def has_published_version(self, xblock):
"""
Returns true if this xblock exists in the published course regardless of whether it's up to date
"""
pass
@abstractmethod
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
pass
@contextmanager
def bulk_operations(self, course_id, emit_signals=True): # pylint: disable=unused-argument
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
"""
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
pass
# pylint: disable=abstract-method
class ModuleStoreWrite(ModuleStoreRead, ModuleStoreAssetWriteInterface):
"""
An abstract interface for a database backend that stores XModuleDescriptor
instances and extends both read and write functionality
"""
__metaclass__ = ABCMeta
@abstractmethod
def update_item(self, xblock, user_id, allow_not_found=False, force=False, **kwargs):
"""
Update the given xblock's persisted repr. Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param allow_not_found: whether this method should raise an exception if the given xblock
has not been persisted before.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass
@abstractmethod
def delete_item(self, location, user_id, **kwargs):
"""
Delete an item and its subtree from persistence. Remove the item from any parents (Note, does not
affect parents from other branches or logical branches; thus, in old mongo, deleting something
whose parent cannot be draft, deletes it from both but deleting a component under a draft vertical
only deletes it from the draft.
Pass the user's unique id which the persistent store
should save with the update if it has that ability.
:param force: fork the structure and don't update the course draftVersion if there's a version
conflict (only applicable to version tracking and conflict detecting persistence stores)
:raises VersionConflictError: if org, course, run, and version_guid given and the current
version head != version_guid and force is not True. (only applicable to version tracking stores)
"""
pass
@abstractmethod
def create_course(self, org, course, run, user_id, fields=None, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
pass
@abstractmethod
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
pass
@abstractmethod
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None):
"""
Sets up source_course_id to point a course with the same content as the desct_course_id. This
operation may be cheap or expensive. It may have to copy all assets and all xblock content or
merely setup new pointers.
Backward compatibility: this method used to require in some modulestores that dest_course_id
pointed to an empty but already created course. Implementers should support this or should
enable creating the course from scratch.
Raises:
ItemNotFoundError: if the source course doesn't exist (or any of its xblocks aren't found)
DuplicateItemError: if the destination course already exists (with content in some cases)
"""
pass
@abstractmethod
def delete_course(self, course_key, user_id, **kwargs):
"""
Deletes the course. It may be a soft or hard delete. It may or may not remove the xblock definitions
depending on the persistence layer and how tightly bound the xblocks are to the course.
Args:
course_key (CourseKey): which course to delete
user_id: id of the user deleting the course
"""
pass
@abstractmethod
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
pass
# pylint: disable=abstract-method
class ModuleStoreReadBase(BulkOperationsMixin, ModuleStoreRead):
'''
Implement interface functionality that can be shared.
'''
# pylint: disable=invalid-name
def __init__(
self,
contentstore=None,
doc_store_config=None, # ignore if passed up
metadata_inheritance_cache_subsystem=None, request_cache=None,
xblock_mixins=(), xblock_select=None, disabled_xblock_types=(), # pylint: disable=bad-continuation
# temporary parms to enable backward compatibility. remove once all envs migrated
db=None, collection=None, host=None, port=None, tz_aware=True, user=None, password=None,
# allow lower level init args to pass harmlessly
** kwargs
):
'''
Set up the error-tracking logic.
'''
super(ModuleStoreReadBase, self).__init__(**kwargs)
self._course_errors = defaultdict(make_error_tracker) # location -> ErrorLog
# pylint: disable=fixme
# TODO move the inheritance_cache_subsystem to classes which use it
self.metadata_inheritance_cache_subsystem = metadata_inheritance_cache_subsystem
self.request_cache = request_cache
self.xblock_mixins = xblock_mixins
self.xblock_select = xblock_select
self.disabled_xblock_types = disabled_xblock_types
self.contentstore = contentstore
def get_course_errors(self, course_key):
"""
Return list of errors for this :class:`.CourseKey`, if any. Raise the same
errors as get_item if course_key isn't present.
"""
# check that item is present and raise the promised exceptions if needed
# pylint: disable=fixme
# TODO (vshnayder): post-launch, make errors properties of items
# self.get_item(location)
assert isinstance(course_key, CourseKey)
return self._course_errors[course_key].errors
def get_errored_courses(self):
"""
Returns an empty dict.
It is up to subclasses to extend this method if the concept
of errored courses makes sense for their implementation.
"""
return {}
def get_course(self, course_id, depth=0, **kwargs):
"""
See ModuleStoreRead.get_course
Default impl--linear search through course list
"""
assert isinstance(course_id, CourseKey)
for course in self.get_courses(**kwargs):
if course.id == course_id:
return course
return None
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Returns the course_id of the course if it was found, else None
Args:
course_id (CourseKey):
ignore_case (boolean): some modulestores are case-insensitive. Use this flag
to search for whether a potentially conflicting course exists in that case.
"""
# linear search through list
assert isinstance(course_id, CourseKey)
if ignore_case:
return next(
(
c.id for c in self.get_courses()
if c.id.org.lower() == course_id.org.lower() and
c.id.course.lower() == course_id.course.lower() and
c.id.run.lower() == course_id.run.lower()
),
None
)
else:
return next(
(c.id for c in self.get_courses() if c.id == course_id),
None
)
def has_published_version(self, xblock):
"""
Returns True since this is a read-only store.
"""
return True
def heartbeat(self):
"""
Is this modulestore ready?
"""
# default is to say yes by not raising an exception
return {'default_impl': True}
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
if self.contentstore:
self.contentstore.close_connections()
super(ModuleStoreReadBase, self).close_connections()
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store
"""
if self.get_modulestore_type(None) != store_type:
raise ValueError(u"Cannot set default store to type {}".format(store_type))
yield
# pylint: disable=abstract-method
class ModuleStoreWriteBase(ModuleStoreReadBase, ModuleStoreWrite):
'''
Implement interface functionality that can be shared.
'''
def __init__(self, contentstore, **kwargs):
super(ModuleStoreWriteBase, self).__init__(contentstore=contentstore, **kwargs)
self.mixologist = Mixologist(self.xblock_mixins)
def partition_fields_by_scope(self, category, fields):
"""
Return dictionary of {scope: {field1: val, ..}..} for the fields of this potential xblock
:param category: the xblock category
:param fields: the dictionary of {fieldname: value}
"""
result = collections.defaultdict(dict)
if fields is None:
return result
cls = self.mixologist.mix(XBlock.load_class(category, select=prefer_xmodules))
for field_name, value in fields.iteritems():
field = getattr(cls, field_name)
result[field.scope][field_name] = value
return result
def create_course(self, org, course, run, user_id, fields=None, runtime=None, **kwargs):
"""
Creates any necessary other things for the course as a side effect and doesn't return
anything useful. The real subclass should call this before it returns the course.
"""
# clone a default 'about' overview module as well
about_location = self.make_course_key(org, course, run).make_usage_key('about', 'overview')
about_descriptor = XBlock.load_class('about')
overview_template = about_descriptor.get_template('overview.yaml')
self.create_item(
user_id,
about_location.course_key,
about_location.block_type,
block_id=about_location.block_id,
definition_data={'data': overview_template.get('data')},
metadata=overview_template.get('metadata'),
runtime=runtime,
continue_version=True,
)
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
This base method just copies the assets. The lower level impls must do the actual cloning of
content.
"""
with self.bulk_operations(dest_course_id):
# copy the assets
if self.contentstore:
self.contentstore.copy_all_course_assets(source_course_id, dest_course_id)
return dest_course_id
def delete_course(self, course_key, user_id, **kwargs):
"""
This base method just deletes the assets. The lower level impls must do the actual deleting of
content.
"""
# delete the assets
if self.contentstore:
self.contentstore.delete_all_course_assets(course_key)
super(ModuleStoreWriteBase, self).delete_course(course_key, user_id)
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
if self.contentstore:
self.contentstore._drop_database() # pylint: disable=protected-access
super(ModuleStoreWriteBase, self)._drop_database() # pylint: disable=protected-access
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the
block that this item should be parented under
block_type: The type of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
item = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, **kwargs)
parent = self.get_item(parent_usage_key)
parent.children.append(item.location)
self.update_item(parent, user_id)
def _flag_library_updated_event(self, library_key):
"""
Wrapper around calls to fire the library_updated signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
library_key - library_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(library_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_library_updated_item = True
else:
self.signal_handler.send("library_updated", library_key=library_key)
def _emit_course_deleted_signal(self, course_key):
"""
Helper method used to emit the course_deleted signal.
"""
if self.signal_handler:
self.signal_handler.send("course_deleted", course_key=course_key)
def only_xmodules(identifier, entry_points):
"""Only use entry_points that are supplied by the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
return default_select(identifier, from_xmodule)
def prefer_xmodules(identifier, entry_points):
"""Prefer entry_points from the xmodule package"""
from_xmodule = [entry_point for entry_point in entry_points if entry_point.dist.key == 'xmodule']
if from_xmodule:
return default_select(identifier, from_xmodule)
else:
return default_select(identifier, entry_points)
class EdxJSONEncoder(json.JSONEncoder):
"""
Custom JSONEncoder that handles `Location` and `datetime.datetime` objects.
`Location`s are encoded as their url string form, and `datetime`s as
ISO date strings
"""
def default(self, obj):
if isinstance(obj, (CourseKey, UsageKey)):
return unicode(obj)
elif isinstance(obj, datetime.datetime):
if obj.tzinfo is not None:
if obj.utcoffset() is None:
return obj.isoformat() + 'Z'
else:
return obj.isoformat()
else:
return obj.isoformat()
else:
return super(EdxJSONEncoder, self).default(obj)
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/distutils/tests/test_install_headers.py
|
147
|
"""Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
Geheimorganisation/sltv
|
refs/heads/master
|
sltv/ui/encoding/vp8enc.py
|
4
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscopio Tecnologia
# Copyright (C) 2010 Gustavo Noronha Silva <gns@gnome.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import pygst
pygst.require("0.10")
import gst
from core import EncodingUI
from sltv.settings import UI_DIR
class VP8EncodingUI(EncodingUI):
def get_widget(self):
return None
def get_name(self):
return "vp8"
def get_description(self):
return "VP8 encoding"
|
saurabh6790/omn-app
|
refs/heads/master
|
setup/doctype/backup_manager/backup_googledrive.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# SETUP:
# install pip install --upgrade google-api-python-client
#
# In Google API
# - create new API project
# - create new oauth2 client (create installed app type as google \
# does not support subdomains)
#
# in conf.py, set oauth2 settings
# gdrive_client_id
# gdrive_client_secret
from __future__ import unicode_literals
import httplib2
import os
import mimetypes
import webnotes
import oauth2client.client
from webnotes.utils import get_base_path, cstr
from webnotes import _, msgprint
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
# define log config for google drive api's log messages
# basicConfig redirects log to stderr
import logging
logging.basicConfig()
@webnotes.whitelist()
def get_gdrive_authorize_url():
flow = get_gdrive_flow()
authorize_url = flow.step1_get_authorize_url()
return {
"authorize_url": authorize_url,
}
def upload_files(name, mimetype, service, folder_id):
if not webnotes.conn:
webnotes.connect()
file_name = os.path.basename(name)
media_body = MediaFileUpload(name, mimetype=mimetype, resumable=True)
body = {
'title': file_name,
'description': 'Backup File',
'mimetype': mimetype,
'parents': [{
'kind': 'drive#filelink',
'id': folder_id
}]
}
request = service.files().insert(body=body, media_body=media_body)
response = None
while response is None:
status, response = request.next_chunk()
def backup_to_gdrive():
from webnotes.utils.backups import new_backup
if not webnotes.conn:
webnotes.connect()
get_gdrive_flow()
credentials_json = webnotes.conn.get_value("Backup Manager", None, "gdrive_credentials")
credentials = oauth2client.client.Credentials.new_from_json(credentials_json)
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
# upload database
backup = new_backup()
path = os.path.join(get_base_path(), "public", "backups")
filename = os.path.join(path, os.path.basename(backup.backup_path_db))
# upload files to database folder
upload_files(filename, 'application/x-gzip', drive_service,
webnotes.conn.get_value("Backup Manager", None, "database_folder_id"))
# upload files to files folder
did_not_upload = []
error_log = []
files_folder_id = webnotes.conn.get_value("Backup Manager", None, "files_folder_id")
webnotes.conn.close()
path = os.path.join(get_base_path(), "public", "files")
for filename in os.listdir(path):
filename = cstr(filename)
found = False
filepath = os.path.join(path, filename)
ext = filename.split('.')[-1]
size = os.path.getsize(filepath)
if ext == 'gz' or ext == 'gzip':
mimetype = 'application/x-gzip'
else:
mimetype = mimetypes.types_map.get("." + ext) or "application/octet-stream"
#Compare Local File with Server File
children = drive_service.children().list(folderId=files_folder_id).execute()
for child in children.get('items', []):
file = drive_service.files().get(fileId=child['id']).execute()
if filename == file['title'] and size == int(file['fileSize']):
found = True
break
if not found:
try:
upload_files(filepath, mimetype, drive_service, files_folder_id)
except Exception, e:
did_not_upload.append(filename)
error_log.append(cstr(e))
webnotes.connect()
return did_not_upload, list(set(error_log))
def get_gdrive_flow():
from oauth2client.client import OAuth2WebServerFlow
from webnotes import conf
if not "gdrive_client_id" in conf:
webnotes.msgprint(_("Please set Google Drive access keys in") + " conf.py",
raise_exception=True)
flow = OAuth2WebServerFlow(conf.gdrive_client_id, conf.gdrive_client_secret,
"https://www.googleapis.com/auth/drive", 'urn:ietf:wg:oauth:2.0:oob')
return flow
@webnotes.whitelist()
def gdrive_callback(verification_code = None):
flow = get_gdrive_flow()
if verification_code:
credentials = flow.step2_exchange(verification_code)
allowed = 1
# make folders to save id
http = httplib2.Http()
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
erpnext_folder_id = create_erpnext_folder(drive_service)
database_folder_id = create_folder('database', drive_service, erpnext_folder_id)
files_folder_id = create_folder('files', drive_service, erpnext_folder_id)
webnotes.conn.set_value("Backup Manager", "Backup Manager", "gdrive_access_allowed", allowed)
webnotes.conn.set_value("Backup Manager", "Backup Manager", "database_folder_id", database_folder_id)
webnotes.conn.set_value("Backup Manager", "Backup Manager", "files_folder_id", files_folder_id)
final_credentials = credentials.to_json()
webnotes.conn.set_value("Backup Manager", "Backup Manager", "gdrive_credentials", final_credentials)
webnotes.msgprint("Updated")
def create_erpnext_folder(service):
if not webnotes.conn:
webnotes.connect()
erpnext = {
'title': 'erpnext',
'mimeType': 'application/vnd.google-apps.folder'
}
erpnext = service.files().insert(body=erpnext).execute()
return erpnext['id']
def create_folder(name, service, folder_id):
database = {
'title': name,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [{
'kind': 'drive#fileLink',
'id': folder_id
}]
}
database = service.files().insert(body=database).execute()
return database['id']
if __name__=="__main__":
backup_to_gdrive()
|
pschmitt/home-assistant
|
refs/heads/dev
|
tests/components/deconz/test_scene.py
|
7
|
"""deCONZ scene platform tests."""
from copy import deepcopy
from homeassistant.components import deconz
import homeassistant.components.scene as scene
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
GROUPS = {
"1": {
"id": "Light group id",
"name": "Light group",
"type": "LightGroup",
"state": {"all_on": False, "any_on": True},
"action": {},
"scenes": [{"id": "1", "name": "Scene"}],
"lights": [],
}
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, scene.DOMAIN, {"scene": {"platform": deconz.DOMAIN}}
)
is True
)
assert deconz.DOMAIN not in hass.data
async def test_no_scenes(hass):
"""Test that scenes can be loaded without scenes being available."""
gateway = await setup_deconz_integration(hass)
assert len(gateway.deconz_ids) == 0
assert len(hass.states.async_all()) == 0
async def test_scenes(hass):
"""Test that scenes works."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["groups"] = deepcopy(GROUPS)
gateway = await setup_deconz_integration(hass, get_state_response=data)
assert "scene.light_group_scene" in gateway.deconz_ids
assert len(hass.states.async_all()) == 1
light_group_scene = hass.states.get("scene.light_group_scene")
assert light_group_scene
group_scene = gateway.api.groups["1"].scenes["1"]
with patch.object(group_scene, "_request", return_value=True) as set_callback:
await hass.services.async_call(
"scene", "turn_on", {"entity_id": "scene.light_group_scene"}, blocking=True
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/groups/1/scenes/1/recall", json={})
await gateway.async_reset()
assert len(hass.states.async_all()) == 0
|
remaudcorentin-dev/python-allrecipes
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
setup(
name='python-allrecipes',
version='0.2.1',
description='Easy-to-use Python API for the allrecipes.com cokking website.',
packages=['allrecipes'],
url='https://github.com/remaudcorentin-dev/python-allrecipes',
author='Corentin Remaud',
author_email='remaudcorentin.dev@gmail.com',
license='MIT',
zip_safe=False,
install_requires=['bs4'],
)
|
perlygatekeeper/glowing-robot
|
refs/heads/master
|
Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/setuptools/_vendor/packaging/_structures.py
|
1152
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
|
dulems/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/generic_inline_admin/tests.py
|
49
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminParametersTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
urls = "generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
urls = "generic_inline_admin.urls"
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class GenericInlineModelAdminTest(TestCase):
urls = "generic_inline_admin.urls"
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
|
LockScreen/Backend
|
refs/heads/master
|
venv/lib/python2.7/site-packages/botocore/loaders.py
|
2
|
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for loading various model files.
This module provides the classes that are used to load models used
by botocore. This can include:
* Service models (e.g. the model for EC2, S3, DynamoDB, etc.)
* Other models associated with a service (pagination, waiters)
* Non service-specific config (Endpoint heuristics, retry config)
Loading a module is broken down into several steps:
* Determining the path to load
* Search the data_path for files to load
* The mechanics of loading the file
The last item is used so that other faster loading mechanism
besides the default JSON loader can be used.
The Search Path
===============
Similar to how the PATH environment variable is to finding executables
and the PYTHONPATH environment variable is to finding python modules
to import, the botocore loaders have the concept of a data path exposed
through AWS_DATA_PATH.
This enables end users to provide additional search paths where we
will attempt to load models outside of the models we ship with
botocore. When you create a ``Loader``, there are two paths
automatically added to the model search path:
* <botocore root>/data/
* ~/.aws/models
The first value is the path where all the model files shipped with
botocore are located.
The second path is so that users can just drop new model files in
``~/.aws/models`` without having to mess around with the AWS_DATA_PATH.
The AWS_DATA_PATH using the platform specific path separator to
separate entries (typically ``:`` on linux and ``;`` on windows).
Directory Layout
================
The Loader expects a particular directory layout. In order for any
directory specified in AWS_DATA_PATH to be considered, it must have
this structure for service models::
<root>
|
|-- servicename1
| |-- 2012-10-25
| |-- service-2.json
|-- ec2
| |-- 2014-01-01
| | |-- paginators-1.json
| | |-- service-2.json
| | |-- waiters-2.json
| |-- 2015-03-01
| |-- paginators-1.json
| |-- service-2.json
| |-- waiters-2.json
That is:
* The root directory contains sub directories that are the name
of the services.
* Within each service directory, there's a sub directory for each
available API version.
* Within each API version, there are model specific files, including
(but not limited to): service-2.json, waiters-2.json, paginators-1.json
The ``-1`` and ``-2`` suffix at the end of the model files denote which version
schema is used within the model. Even though this information is available in
the ``version`` key within the model, this version is also part of the filename
so that code does not need to load the JSON model in order to determine which
version to use.
"""
import os
from botocore import BOTOCORE_ROOT
from botocore.compat import json
from botocore.compat import OrderedDict
from botocore.exceptions import DataNotFoundError, ValidationError
def instance_cache(func):
"""Cache the result of a method on a per instance basis.
This is not a general purpose caching decorator. In order
for this to be used, it must be used on methods on an
instance, and that instance *must* provide a
``self._cache`` dictionary.
"""
def _wrapper(self, *args, **kwargs):
key = (func.__name__,) + args
for pair in sorted(kwargs.items()):
key += pair
if key in self._cache:
return self._cache[key]
data = func(self, *args, **kwargs)
self._cache[key] = data
return data
return _wrapper
class JSONFileLoader(object):
"""Loader JSON files.
This class can load the default format of models, which is a JSON file.
"""
def exists(self, file_path):
"""Checks if the file exists.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: True if file path exists, False otherwise.
"""
return os.path.isfile(file_path + '.json')
def load_file(self, file_path):
"""Attempt to load the file path.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: The loaded data if it exists, otherwise None.
"""
full_path = file_path + '.json'
if not os.path.isfile(full_path):
return
# By default the file will be opened with locale encoding on Python 3.
# We specify "utf8" here to ensure the correct behavior.
with open(full_path, 'rb') as fp:
payload = fp.read().decode('utf-8')
return json.loads(payload, object_pairs_hook=OrderedDict)
def create_loader(search_path_string=None):
"""Create a Loader class.
This factory function creates a loader given a search string path.
:type search_string_path: str
:param search_string_path: The AWS_DATA_PATH value. A string
of data path values separated by the ``os.path.pathsep`` value,
which is typically ``:`` on POSIX platforms and ``;`` on
windows.
:return: A ``Loader`` instance.
"""
if search_path_string is None:
return Loader()
paths = []
extra_paths = search_path_string.split(os.pathsep)
for path in extra_paths:
path = os.path.expanduser(os.path.expandvars(path))
paths.append(path)
return Loader(extra_search_paths=paths)
class Loader(object):
"""Find and load data models.
This class will handle searching for and loading data models.
The main method used here is ``load_service_model``, which is a
convenience method over ``load_data`` and ``determine_latest_version``.
"""
FILE_LOADER_CLASS = JSONFileLoader
# The included models in botocore/data/ that we ship with botocore.
BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data')
# For convenience we automatically add ~/.aws/models to the data path.
CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'),
'.aws', 'models')
def __init__(self, extra_search_paths=None, file_loader=None,
cache=None, include_default_search_paths=True):
self._cache = {}
if file_loader is None:
file_loader = self.FILE_LOADER_CLASS()
self.file_loader = file_loader
if include_default_search_paths:
self._search_paths = [self.CUSTOMER_DATA_PATH,
self.BUILTIN_DATA_PATH]
else:
self._search_paths = []
if extra_search_paths is not None:
self._search_paths.extend(extra_search_paths)
@property
def search_paths(self):
return self._search_paths
@instance_cache
def list_available_services(self, type_name):
"""List all known services.
This will traverse the search path and look for all known
services.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the list of available services depends on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:return: A list of all services. The list of services will
be sorted.
"""
services = set()
for possible_path in self._potential_locations():
# Any directory in the search path is potentially a service.
# We'll collect any initial list of potential services,
# but we'll then need to further process these directories
# by searching for the corresponding type_name in each
# potential directory.
possible_services = [
d for d in os.listdir(possible_path)
if os.path.isdir(os.path.join(possible_path, d))]
for service_name in possible_services:
full_dirname = os.path.join(possible_path, service_name)
api_versions = os.listdir(full_dirname)
for api_version in api_versions:
full_load_path = os.path.join(full_dirname,
api_version,
type_name)
if self.file_loader.exists(full_load_path):
services.add(service_name)
break
return sorted(services)
@instance_cache
def determine_latest_version(self, service_name, type_name):
"""Find the latest API version available for a service.
:type service_name: str
:param service_name: The name of the service.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the latest API version available can depend on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:rtype: str
:return: The latest API version. If the service does not exist
or does not have any available API data, then a
``DataNotFoundError`` exception will be raised.
"""
return max(self.list_api_versions(service_name, type_name))
@instance_cache
def list_api_versions(self, service_name, type_name):
"""List all API versions available for a particular service type
:type service_name: str
:param service_name: The name of the service
:type type_name: str
:param type_name: The type name for the service (i.e service-2,
paginators-1, etc.)
:rtype: list
:return: A list of API version strings in sorted order.
"""
known_api_versions = set()
for possible_path in self._potential_locations(service_name,
must_exist=True,
is_dir=True):
for dirname in os.listdir(possible_path):
full_path = os.path.join(possible_path, dirname, type_name)
# Only add to the known_api_versions if the directory
# contains a service-2, paginators-1, etc. file corresponding
# to the type_name passed in.
if self.file_loader.exists(full_path):
known_api_versions.add(dirname)
if not known_api_versions:
raise DataNotFoundError(data_path=service_name)
return sorted(known_api_versions)
@instance_cache
def load_service_model(self, service_name, type_name, api_version=None):
"""Load a botocore service model
This is the main method for loading botocore models (e.g. a service
model, pagination configs, waiter configs, etc.).
:type service_name: str
:param service_name: The name of the service (e.g ``ec2``, ``s3``).
:type type_name: str
:param type_name: The model type. Valid types include, but are not
limited to: ``service-2``, ``paginators-1``, ``waiters-2``.
:type api_version: str
:param api_version: The API version to load. If this is not
provided, then the latest API version will be used.
:return: The loaded data, or a DataNotFoundError if no data
could be found.
"""
# Wrapper around the load_data. This will calculate the path
# to call load_data with.
if service_name not in self.list_available_services('service-2'):
raise ValidationError(value=service_name, param='service_name',
type_name='str')
if api_version is None:
api_version = self.determine_latest_version(
service_name, type_name)
full_path = os.path.join(service_name, api_version, type_name)
return self.load_data(full_path)
@instance_cache
def load_data(self, name):
"""Load data given a data path.
This is a low level method that will search through the various
search paths until it's able to load a value. This is typically
only needed to load *non* model files (such as _endpoints and
_retry). If you need to load model files, you should prefer
``load_service_model``.
:type name: str
:param name: The data path, i.e ``ec2/2015-03-01/service-2``.
:return: The loaded data. If no data could be found then
a DataNotFoundError is raised.
"""
for possible_path in self._potential_locations(name):
found = self.file_loader.load_file(possible_path)
if found is not None:
return found
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=name)
def _potential_locations(self, name=None, must_exist=False,
is_dir=False):
# Will give an iterator over the full path of potential locations
# according to the search path.
for path in self.search_paths:
if os.path.isdir(path):
full_path = path
if name is not None:
full_path = os.path.join(path, name)
if not must_exist:
yield full_path
else:
if is_dir and os.path.isdir(full_path):
yield full_path
elif os.path.exists(full_path):
yield full_path
|
sjsrey/pysal
|
refs/heads/master
|
pysal/model/spopt/__init__.py
|
2
|
from spopt import region
|
prasadtalasila/INET-Vagrant-Demos
|
refs/heads/master
|
Nonce_Demo/impacket-0.9.12/impacket/testcases/ImpactPacket/test_ICMP6.py
|
1
|
#!/usr/bin/env python
#Impact test version
try:
from impacket import IP6_Address, IP6, ImpactDecoder, ICMP6
except:
pass
#Standalone test version
try:
import sys
sys.path.insert(0,"../..")
import IP6_Address, IP6, ImpactDecoder, ICMP6
except:
pass
import unittest
class TestICMP6(unittest.TestCase):
def setUp(self):
self.packet_list = self.generate_icmp6_constructed_packets()
self.message_description_list = [
"Echo Request",
"Echo Reply",
"Parameter problem - Erroneous header field",
"Parameter problem - Unrecognized Next Header",
"Parameter problem - Unrecognized IP6 Option",
"Destination unreachable - No route to destination",
"Destination unreachable - Administratively prohibited",
"Destination unreachable - Beyond scope of source address",
"Destination unreachable - Address unreachable ",
"Destination unreachable - Port unreachable",
"Destination unreachable - Src addr failed due to policy",
"Destination unreachable - Reject route",
"Time exceeded - Hop limit exceeded in transit",
"Time exceeded - Fragment reassembly time exceeded",
"Packet too big"
]
self.reference_data_list = [
[0x80, 0x00, 0xA2, 0xA6, 0x00, 0x01, 0x00, 0x02, 0xFE, 0x56, 0x88],#Echo Request
[0x81, 0x00, 0xA1, 0xA6, 0x00, 0x01, 0x00, 0x02, 0xFE, 0x56, 0x88],#Echo Reply
[0x04, 0x00, 0x1E, 0xA8, 0x00, 0x00, 0x00, 0x02, 0xFE, 0x56, 0x88],#Parameter problem
[0x04, 0x01, 0x1E, 0xA7, 0x00, 0x00, 0x00, 0x02, 0xFE, 0x56, 0x88],
[0x04, 0x02, 0x1E, 0xA6, 0x00, 0x00, 0x00, 0x02, 0xFE, 0x56, 0x88],
[0x01, 0x00, 0x21, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],#Dest. unreachable
[0x01, 0x01, 0x21, 0xA9, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x01, 0x02, 0x21, 0xA8, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x01, 0x03, 0x21, 0xA7, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x01, 0x04, 0x21, 0xA6, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x01, 0x05, 0x21, 0xA5, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x01, 0x06, 0x21, 0xA4, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x03, 0x00, 0x1F, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],#Time exceeded
[0x03, 0x01, 0x1F, 0xA9, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x56, 0x88],
[0x02, 0x00, 0x1B, 0x96, 0x00, 0x00, 0x05, 0x14, 0xFE, 0x56, 0x88]#Packet too big
]
def encapsulate_icmp6_packet_in_ip6_packet(self, icmp6_packet):
#Build IP6 reference packet (which will be used to construct the pseudo-header and checksum)
ip6_packet = IP6.IP6()
ip6_packet.set_traffic_class(0)
ip6_packet.set_flow_label(0)
ip6_packet.set_hop_limit(1)
ip6_packet.set_source_address("FE80::78F8:89D1:30FF:256B")
ip6_packet.set_destination_address("FF02::1")
#Encapsulate ICMP6 packet in IP6 packet, calculate the checksum using the pseudo-header
ip6_packet.contains(icmp6_packet)
ip6_packet.set_next_header(ip6_packet.child().get_ip_protocol_number())
ip6_packet.set_payload_length(ip6_packet.child().get_size())
icmp6_packet.calculate_checksum()
return ip6_packet
def compare_icmp6_packet_with_reference_buffer(self, icmp6_packet, reference_buffer, test_fail_message):
#Encapsulate the packet, in order to compute the checksum
ip6_packet = self.encapsulate_icmp6_packet_in_ip6_packet(icmp6_packet)
#Extract the header and payload bytes
icmp6_header_buffer = ip6_packet.child().get_bytes().tolist()
icmp6_payload_buffer = icmp6_packet.child().get_bytes().tolist()
generated_buffer = icmp6_header_buffer + icmp6_payload_buffer
self.assertEquals(generated_buffer, reference_buffer, test_fail_message)
def generate_icmp6_constructed_packets(self):
packet_list = []
arbitrary_data = [0xFE, 0x56, 0x88]
echo_id = 1
echo_sequence_number = 2
icmp6_packet = ICMP6.ICMP6.Echo_Request(echo_id, echo_sequence_number, arbitrary_data)
packet_list.append(icmp6_packet)
icmp6_packet = ICMP6.ICMP6.Echo_Reply(echo_id, echo_sequence_number, arbitrary_data)
packet_list.append(icmp6_packet)
originating_packet_data = arbitrary_data
for code in range(0, 3):
problem_pointer = 2
icmp6_packet = ICMP6.ICMP6.Parameter_Problem(code, problem_pointer, originating_packet_data)
packet_list.append(icmp6_packet)
for code in range(0, 7):
icmp6_packet = ICMP6.ICMP6.Destination_Unreachable(code, originating_packet_data)
packet_list.append(icmp6_packet)
for code in range(0, 2):
icmp6_packet = ICMP6.ICMP6.Time_Exceeded(code, originating_packet_data)
packet_list.append(icmp6_packet)
icmp6_packet = ICMP6.ICMP6.Packet_Too_Big(1300, originating_packet_data)
packet_list.append(icmp6_packet)
return packet_list
def test_message_construction(self):
for packet, reference, msg in zip(self.packet_list, self.reference_data_list, self.message_description_list):
self.compare_icmp6_packet_with_reference_buffer(packet, reference, "ICMP6 creation of " + msg + " - Buffer mismatch")
def test_message_decoding(self):
d = ImpactDecoder.ICMP6Decoder()
msg_types = [
ICMP6.ICMP6.ECHO_REQUEST,
ICMP6.ICMP6.ECHO_REPLY,
ICMP6.ICMP6.PARAMETER_PROBLEM,
ICMP6.ICMP6.PARAMETER_PROBLEM,
ICMP6.ICMP6.PARAMETER_PROBLEM,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.DESTINATION_UNREACHABLE,
ICMP6.ICMP6.TIME_EXCEEDED,
ICMP6.ICMP6.TIME_EXCEEDED,
ICMP6.ICMP6.PACKET_TOO_BIG
]
msg_codes = [
0,
0,
ICMP6.ICMP6.ERRONEOUS_HEADER_FIELD_ENCOUNTERED,
ICMP6.ICMP6.UNRECOGNIZED_NEXT_HEADER_TYPE_ENCOUNTERED,
ICMP6.ICMP6.UNRECOGNIZED_IPV6_OPTION_ENCOUNTERED,
ICMP6.ICMP6.NO_ROUTE_TO_DESTINATION,
ICMP6.ICMP6.ADMINISTRATIVELY_PROHIBITED,
ICMP6.ICMP6.BEYOND_SCOPE_OF_SOURCE_ADDRESS,
ICMP6.ICMP6.ADDRESS_UNREACHABLE,
ICMP6.ICMP6.PORT_UNREACHABLE,
ICMP6.ICMP6.SOURCE_ADDRESS_FAILED_INGRESS_EGRESS_POLICY,
ICMP6.ICMP6.REJECT_ROUTE_TO_DESTINATION,
ICMP6.ICMP6.HOP_LIMIT_EXCEEDED_IN_TRANSIT,
ICMP6.ICMP6.FRAGMENT_REASSEMBLY_TIME_EXCEEDED,
0
]
for i in range (0, len(self.reference_data_list)):
p = d.decode(self.reference_data_list[i])
self.assertEquals(p.get_type(), msg_types[i], self.message_description_list[i] + " - Msg type mismatch")
self.assertEquals(p.get_code(), msg_codes[i], self.message_description_list[i] + " - Msg code mismatch")
if i in range(0, 2):
self.assertEquals(p.get_echo_id(), 1, self.message_description_list[i] + " - ID mismatch")
self.assertEquals(p.get_echo_sequence_number(), 2, self.message_description_list[i] + " - Sequence number mismatch")
self.assertEquals(p.get_echo_arbitrary_data().tolist(), [0xFE, 0x56, 0x88], self.message_description_list[i] + " - Arbitrary data mismatch")
if i in range(2, 5):
self.assertEquals(p.get_parm_problem_pointer(), 2, self.message_description_list[i] + " - Pointer mismatch")
if i in range(5, 15):
self.assertEquals(p.get_originating_packet_data().tolist(), [0xFE, 0x56, 0x88], self.message_description_list[i] + " - Originating packet data mismatch")
if i in range(14, 15):
self.assertEquals(p.get_mtu(), 1300, self.message_description_list[i] + " - MTU mismatch")
suite = unittest.TestLoader().loadTestsFromTestCase(TestICMP6)
unittest.TextTestRunner(verbosity=2).run(suite)
|
arajparaj/pysub
|
refs/heads/master
|
subtitle.py
|
1
|
#!/usr/bin/python -tt
import os
import hashlib
import urllib2
import sys
from PyQt4 import QtGui,QtCore
def get_hash(name):
readsize = 64 * 1024
with open(name, 'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
def sub_downloader(path):
hash = get_hash(path)
replace = ['.avi', '.dat', '.mp4', '.mkv', '.vob',".mpg",".mpeg"]
for content in replace:
path = path.replace(content,"")
if not os.path.exists(path+".srt"):
headers = { 'User-Agent' : 'SubDB/1.0 (subtitle-downloader/1.0; http://google.com)' }
url = "http://api.thesubdb.com/?action=download&hash="+hash+"&language=en"
req = urllib2.Request(url, '', headers)
try:
response = urllib2.urlopen(req).read()
with open (path+".srt","wb") as subtitle:
subtitle.write(response)
except urllib2.HTTPError, e:
print('subtitle not found')
def processFile(currentDir):
currentDir = os.path.abspath(currentDir)
filesInCurDir = os.listdir(currentDir)
for file in filesInCurDir:
curFile = os.path.join(currentDir, file)
if os.path.isfile(curFile):
curFileExtension = curFile[-3:]
if curFileExtension in ['avi', 'dat', 'mp4', 'mkv', 'vob',"mpg","mpeg"]:
print('downloading the subtitle for %s' %curFile)
sub_downloader(curFile)
print('downloading completed')
else:
print('entering to directory %s'%curFile)
processFile(curFile)
if __name__ == "__main__" :
app = QtGui.QApplication(sys.argv)
widget = QtGui.QWidget()
widget.resize(500, 250)
screen = QtGui.QDesktopWidget().screenGeometry()
widget_size = widget.geometry()
widget.move((screen.width()-widget_size.width())/2,(screen.height()-widget_size.height())/2)
widget.setWindowTitle('https://github.com/arajparaj/pysub')
widget.setWindowIcon(QtGui.QIcon('exit.png'))
foldername = QtGui.QFileDialog.getExistingDirectory(widget,'Choose a Video Folder directory')
if foldername:
processFile(str(foldername))
else :
print "please input a valid folder name"
|
fsufitch/dailyprogrammer
|
refs/heads/master
|
ideas/iddqd/inputgen.py
|
1
|
import random, sys
rows = int(sys.argv[1])
cols = int(sys.argv[2])
zombies = int(sys.argv[3])
print(rows, cols)
positions = set()
while len(positions) < zombies:
row = random.randrange(0, rows)
col = random.randrange(0, cols)
positions.add( (row, col) )
for row, col in positions:
print(row, col)
|
Anaphory/p4-phylogeny
|
refs/heads/master
|
share/Examples/L_mcmc/A_simple/sMcmc.py
|
4
|
read("../d.nex")
d = Data()
t = func.randomTree(taxNames=d.taxNames)
t.data = d
t.newComp(free=1, spec='empirical')
t.newRMatrix(free=1, spec='ones')
t.setNGammaCat(nGammaCat=4)
t.newGdasrv(free=1, val=0.5)
t.setPInvar(free=1, val=0.2)
m = Mcmc(t, nChains=4, runNum=0, sampleInterval=10, checkPointInterval=2000)
m.run(4000)
|
thundernet8/WRGameVideos-Server
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/sql/util.py
|
9
|
# sql/util.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from .. import exc, util
from .base import _from_objects, ColumnSet
from . import operators, visitors
from itertools import chain
from collections import deque
from .elements import BindParameter, ColumnClause, ColumnElement, \
Null, UnaryExpression, literal_column, Label, _label_reference, \
_textual_label_reference
from .selectable import ScalarSelect, Join, FromClause, FromGrouping
from .schema import Column
join_condition = util.langhelpers.public_factory(
Join._join_condition,
".sql.util.join_condition")
# names that are still being imported from the outside
from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate
from .elements import _find_columns
from .ddl import sort_tables
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == 'binary' and \
operators.is_comparison(element.operator):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections': False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or
not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(
surface_selectables(right)
)
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam': visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_params(object):
"""A string view of bound parameters, truncating
display to the given number of 'multi' parameter sets.
"""
def __init__(self, params, batches):
self.params = params
self.batches = batches
def __repr__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > self.batches and \
isinstance(self.params[0], (list, dict, tuple)):
msg = " ... displaying %i of %i total bound parameter sets ... "
return ' '.join((
repr(self.params[:self.batches - 2])[0:-1],
msg % (self.batches, len(self.params)),
repr(self.params[-2:])[1:]
))
else:
return repr(self.params)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if isinstance(binary.left, BindParameter) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, BindParameter) \
and binary.right._identifying_key in nulls:
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary': visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary
key" from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
only_synonyms = kw.pop('only_synonyms', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and \
(not only_synonyms or
c.name == col.name):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(
chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and \
(not only_synonyms or
c.name == binary.left.name):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {'binary': visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def col_is(a, b):
# return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or \
not isinstance(binary.right, ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and \
isinstance(binary.right, Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary': visit_binary})
return pairs
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None,
include_fn=None, exclude_fn=None,
adapt_on_names=False, anonymize_labels=False):
self.__traverse_options__ = {
'stop_on': [selectable],
'anonymize_labels': anonymize_labels}
self.selectable = selectable
self.include_fn = include_fn
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded,
_seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(
equiv, require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, FromClause) and \
self.selectable.is_derived_from(col):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Key aspects of ColumnAdapter include:
* Expressions that are adapted are stored in a persistent
.columns collection; so that an expression E adapted into
an expression E1, will return the same object E1 when adapted
a second time. This is important in particular for things like
Label objects that are anonymized, so that the ColumnAdapter can
be used to present a consistent "adapted" view of things.
* Exclusion of items from the persistent collection based on
include/exclude rules, but also independent of hash identity.
This because "annotated" items all have the same hash identity as their
parent.
* "wrapping" capability is added, so that the replacement of an expression
E can proceed through a series of adapters. This differs from the
visitor's "chaining" feature in that the resulting object is passed
through all replacing functions unconditionally, rather than stopping
at the first one that returns non-None.
* An adapt_required option, used by eager loading to indicate that
We don't trust a result row column that is not translated.
This is to prevent a column from being interpreted as that
of the child row in a self-referential scenario, see
inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, adapt_required=False,
include_fn=None, exclude_fn=None,
adapt_on_names=False,
allow_label_resolve=True,
anonymize_labels=False):
ClauseAdapter.__init__(self, selectable, equivalents,
include_fn=include_fn, exclude_fn=exclude_fn,
adapt_on_names=adapt_on_names,
anonymize_labels=anonymize_labels)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
if self.include_fn or self.exclude_fn:
self.columns = self._IncludeExcludeMapping(self, self.columns)
self.adapt_required = adapt_required
self.allow_label_resolve = allow_label_resolve
self._wrap = None
class _IncludeExcludeMapping(object):
def __init__(self, parent, columns):
self.parent = parent
self.columns = columns
def __getitem__(self, key):
if (
self.parent.include_fn and not self.parent.include_fn(key)
) or (
self.parent.exclude_fn and self.parent.exclude_fn(key)
):
if self.parent._wrap:
return self.parent._wrap.columns[key]
else:
return key
return self.columns[key]
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__.update(self.__dict__)
ac._wrap = adapter
ac.columns = util.populate_column_dict(ac._locate_col)
if ac.include_fn or ac.exclude_fn:
ac.columns = self._IncludeExcludeMapping(ac, ac.columns)
return ac
def traverse(self, obj):
return self.columns[obj]
adapt_clause = traverse
adapt_list = ClauseAdapter.copy_and_process
def _locate_col(self, col):
c = ClauseAdapter.traverse(self, col)
if self._wrap:
c2 = self._wrap._locate_col(c)
if c2 is not None:
c = c2
if self.adapt_required and c is col:
return None
c._allow_label_resolve = self.allow_label_resolve
return c
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
|
fuhongliang/odoo
|
refs/heads/8.0
|
addons/hr_timesheet/__init__.py
|
410
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jnovinger/peewee
|
refs/heads/master
|
playhouse/kv.py
|
15
|
from base64 import b64decode
from base64 import b64encode
import operator
import pickle
try:
import simplejson as json
except ImportError:
import json
from peewee import *
from peewee import Node
try:
from playhouse.apsw_ext import APSWDatabase
def KeyValueDatabase(db_name, **kwargs):
return APSWDatabase(db_name, **kwargs)
except ImportError:
def KeyValueDatabase(db_name, **kwargs):
return SqliteDatabase(db_name, check_same_thread=False, **kwargs)
Sentinel = type('Sentinel', (object,), {})
key_value_db = KeyValueDatabase(':memory:', threadlocals=False)
class PickleField(BlobField):
def db_value(self, value):
return b64encode(pickle.dumps(value))
def python_value(self, value):
return pickle.loads(b64decode(value))
class JSONField(TextField):
def db_value(self, value):
return json.dumps(value)
def python_value(self, value):
if value is not None:
return json.loads(value)
class KeyStore(object):
"""
Rich dictionary with support for storing a wide variety of data types.
:param peewee.Field value_type: Field type to use for values.
:param boolean ordered: Whether keys should be returned in sorted order.
:param peewee.Model model: Model class to use for Keys/Values.
"""
def __init__(self, value_field, ordered=False, database=None):
self._value_field = value_field
self._ordered = ordered
self._database = database or key_value_db
self._compiler = self._database.compiler()
self.model = self.create_model()
self.key = self.model.key
self.value = self.model.value
self._database.create_table(self.model, True)
self._native_upsert = isinstance(self._database, SqliteDatabase)
def create_model(self):
class KVModel(Model):
key = CharField(max_length=255, primary_key=True)
value = self._value_field
class Meta:
database = self._database
return KVModel
def query(self, *select):
query = self.model.select(*select).tuples()
if self._ordered:
query = query.order_by(self.key)
return query
def convert_node(self, node):
if not isinstance(node, Node):
return (self.key == node), True
return node, False
def __contains__(self, key):
node, _ = self.convert_node(key)
return self.model.select().where(node).exists()
def __len__(self):
return self.model.select().count()
def __getitem__(self, node):
converted, is_single = self.convert_node(node)
result = self.query(self.value).where(converted)
item_getter = operator.itemgetter(0)
result = [item_getter(val) for val in result]
if len(result) == 0 and is_single:
raise KeyError(node)
elif is_single:
return result[0]
return result
def _upsert(self, key, value):
self.model.insert(**{
self.key.name: key,
self.value.name: value}).upsert().execute()
def __setitem__(self, node, value):
if isinstance(node, Node):
update = {self.value.name: value}
self.model.update(**update).where(node).execute()
elif self._native_upsert:
self._upsert(node, value)
else:
try:
self.model.create(key=node, value=value)
except:
self._database.rollback()
(self.model
.update(**{self.value.name: value})
.where(self.key == node)
.execute())
def __delitem__(self, node):
converted, _ = self.convert_node(node)
self.model.delete().where(converted).execute()
def __iter__(self):
return self.query().execute()
def keys(self):
return map(operator.itemgetter(0), self.query(self.key))
def values(self):
return map(operator.itemgetter(0), self.query(self.value))
def items(self):
return iter(self)
def get(self, k, default=None):
try:
return self[k]
except KeyError:
return default
def pop(self, k, default=Sentinel):
with self._database.transaction():
node, is_single = self.convert_node(k)
try:
res = self[k]
except KeyError:
if default is Sentinel:
raise
return default
del(self[node])
return res
def clear(self):
self.model.delete().execute()
class PickledKeyStore(KeyStore):
def __init__(self, ordered=False, database=None):
super(PickledKeyStore, self).__init__(PickleField(), ordered, database)
class JSONKeyStore(KeyStore):
def __init__(self, ordered=False, database=None):
field = JSONField(null=True)
super(JSONKeyStore, self).__init__(field, ordered, database)
|
mbayon/TFG-MachineLearning
|
refs/heads/master
|
venv/lib/python3.6/site-packages/django/core/servers/basehttp.py
|
44
|
"""
HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21).
Based on wsgiref.simple_server which is part of the standard library since 2.5.
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE!
"""
from __future__ import unicode_literals
import logging
import socket
import sys
from wsgiref import simple_server
from django.core.exceptions import ImproperlyConfigured
from django.core.wsgi import get_wsgi_application
from django.utils import six
from django.utils.module_loading import import_string
from django.utils.six.moves import socketserver
__all__ = ('WSGIServer', 'WSGIRequestHandler')
logger = logging.getLogger('django.server')
def get_internal_wsgi_application():
"""
Loads and returns the WSGI application as configured by the user in
``settings.WSGI_APPLICATION``. With the default ``startproject`` layout,
this will be the ``application`` object in ``projectname/wsgi.py``.
This function, and the ``WSGI_APPLICATION`` setting itself, are only useful
for Django's internal server (runserver); external WSGI servers should just
be configured to point to the correct application object directly.
If settings.WSGI_APPLICATION is not set (is ``None``), we just return
whatever ``django.core.wsgi.get_wsgi_application`` returns.
"""
from django.conf import settings
app_path = getattr(settings, 'WSGI_APPLICATION')
if app_path is None:
return get_wsgi_application()
try:
return import_string(app_path)
except ImportError as e:
msg = (
"WSGI application '%(app_path)s' could not be loaded; "
"Error importing module: '%(exception)s'" % ({
'app_path': app_path,
'exception': e,
})
)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
def is_broken_pipe_error():
exc_type, exc_value = sys.exc_info()[:2]
return issubclass(exc_type, socket.error) and exc_value.args[0] == 32
class WSGIServer(simple_server.WSGIServer, object):
"""BaseHTTPServer that implements the Python WSGI protocol"""
request_queue_size = 10
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
self.allow_reuse_address = kwargs.pop('allow_reuse_address', True)
super(WSGIServer, self).__init__(*args, **kwargs)
def handle_error(self, request, client_address):
if is_broken_pipe_error():
logger.info("- Broken pipe from %s\n", client_address)
else:
super(WSGIServer, self).handle_error(request, client_address)
# Inheriting from object required on Python 2.
class ServerHandler(simple_server.ServerHandler, object):
def handle_error(self):
# Ignore broken pipe errors, otherwise pass on
if not is_broken_pipe_error():
super(ServerHandler, self).handle_error()
class WSGIRequestHandler(simple_server.WSGIRequestHandler, object):
def address_string(self):
# Short-circuit parent method to not call socket.getfqdn
return self.client_address[0]
def log_message(self, format, *args):
extra = {
'request': self.request,
'server_time': self.log_date_time_string(),
}
if args[1][0] == '4':
# 0x16 = Handshake, 0x03 = SSL 3.0 or TLS 1.x
if args[0].startswith(str('\x16\x03')):
extra['status_code'] = 500
logger.error(
"You're accessing the development server over HTTPS, but "
"it only supports HTTP.\n", extra=extra,
)
return
if args[1].isdigit() and len(args[1]) == 3:
status_code = int(args[1])
extra['status_code'] = status_code
if status_code >= 500:
level = logger.error
elif status_code >= 400:
level = logger.warning
else:
level = logger.info
else:
level = logger.info
level(format, *args, extra=extra)
def get_environ(self):
# Strip all headers with underscores in the name before constructing
# the WSGI environ. This prevents header-spoofing based on ambiguity
# between underscores and dashes both normalized to underscores in WSGI
# env vars. Nginx and Apache 2.4+ both do this as well.
for k, v in self.headers.items():
if '_' in k:
del self.headers[k]
return super(WSGIRequestHandler, self).get_environ()
def handle(self):
"""Copy of WSGIRequestHandler, but with different ServerHandler"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def run(addr, port, wsgi_handler, ipv6=False, threading=False, server_cls=WSGIServer):
server_address = (addr, port)
if threading:
httpd_cls = type(str('WSGIServer'), (socketserver.ThreadingMixIn, server_cls), {})
else:
httpd_cls = server_cls
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
if threading:
# ThreadingMixIn.daemon_threads indicates how threads will behave on an
# abrupt shutdown; like quitting the server by the user or restarting
# by the auto-reloader. True means the server will not wait for thread
# termination before it quits. This will make auto-reloader faster
# and will prevent the need to kill the server manually if a thread
# isn't terminating correctly.
httpd.daemon_threads = True
httpd.set_app(wsgi_handler)
httpd.serve_forever()
|
sparky-making-code-fun/DjangoBoots
|
refs/heads/master
|
boots/fields/tests/tests.py
|
1
|
# coding=utf-8
"""
Widget Tests
"""
import django.test
from boots.fields import widgets
from boots.fields import fields as boot_fields
class TestWidgets(django.test.TestCase):
def test_calendar_widget_render(self):
"""
does our calendar widget have an ID?
"""
cal_widg = widgets.CalendarWidget()
result = cal_widg.render('testname', '12-12-2015')
needle = """<input type="text" date="12-12-2015"
class="CalendarWidget" name="testname" >"""
self.assertInHTML(needle, result, 1)
def test_date_range_widget(self):
dr_widg = widgets.DateRangeWidget()
result = dr_widg.render('testwidget', '12-12-2015')
needle1 = """<input class="DateRangeWidget"
name="testwidget_0" type="text" value="2015-12-12" />"""
needle2 = """<input class="DateRangeWidget"
name="testwidget_1" type="text" value="2015-12-12" />"""
self.assertInHTML(needle1, result, count=1)
self.assertInHTML(needle2, result, count=1)
def test_right_side_add_on(self):
widge = widgets.RightSideAddOnWidget()
expected = "anystringyouwant"
result = widge.render('bubba', '@home',
attrs={"add_on_text": expected})
self.assertIn(expected, result)
def test_dropdown_widget(self):
data = [{'href': 'http://www.google.com', 'label': 'Google Thing'},
{'href': 'http://fark.com', 'label': 'Fark Thing'}]
dd_widg = widgets.DropDownWidget(dropdown={'actions': data},
attrs={'id': 'testid'})
result = dd_widg.render('fake_name', 'fake_value', attrs=dd_widg.attrs)
needle = '<li><a href="http://fark.com">Fark Thing</a></li>'
self.assertInHTML(needle, result)
class TestFields(django.test.TestCase):
def test_dropdown_field(self):
data = {'actions': [{'href': 'http://www.google.com',
'label': 'Google Thing'},
{'href': 'http://fark.com', 'label': 'Fark Thing'}]
}
ddf = boot_fields.DropDownField(data, max_length=2)
needle = '<li><a href="http://fark.com">Fark Thing</a></li>'
result = ddf.widget.render('tstname', 'testvalue',
attrs={'id': 'testid'})
self.assertInHTML(needle, result)
def test_change_at_symbol_field(self):
expected = "somestring"
f = boot_fields.AtSymbolInputField(expected, symbol='!')
result = f.widget.render("name", "value")
needle = """<span class="input-group-addon"
id="basic-addon2">!{0}</span>""".format(expected)
self.assertInHTML(needle, result)
def test_at_symbol_field(self):
expected = "somestring"
f = boot_fields.AtSymbolInputField(expected)
result = f.widget.render("name", "value")
needle = """<span class="input-group-addon"
id="basic-addon2">@{0}</span>""".format(expected)
self.assertInHTML(needle, result)
|
philanthropy-u/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/oauth_dispatch/tests/test_dop_adapter.py
|
3
|
"""
Tests for DOP Adapter
"""
from datetime import timedelta
import ddt
from django.test import TestCase
from django.utils.timezone import now
from provider.oauth2 import models
from provider import constants
from student.tests.factories import UserFactory
from ..adapters import DOPAdapter
from .constants import DUMMY_REDIRECT_URL
@ddt.ddt
class DOPAdapterTestCase(TestCase):
"""
Test class for DOPAdapter.
"""
adapter = DOPAdapter()
def setUp(self):
super(DOPAdapterTestCase, self).setUp()
self.user = UserFactory()
self.public_client = self.adapter.create_public_client(
name='public client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='public-client-id',
)
self.confidential_client = self.adapter.create_confidential_client(
name='confidential client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='confidential-client-id',
)
@ddt.data(
('confidential', constants.CONFIDENTIAL),
('public', constants.PUBLIC),
)
@ddt.unpack
def test_create_client(self, client_name, client_type):
client = getattr(self, '{}_client'.format(client_name))
self.assertIsInstance(client, models.Client)
self.assertEqual(client.client_id, '{}-client-id'.format(client_name))
self.assertEqual(client.client_type, client_type)
def test_get_client(self):
client = self.adapter.get_client(client_type=constants.CONFIDENTIAL)
self.assertIsInstance(client, models.Client)
self.assertEqual(client.client_type, constants.CONFIDENTIAL)
def test_get_client_not_found(self):
with self.assertRaises(models.Client.DoesNotExist):
self.adapter.get_client(client_id='not-found')
def test_get_client_for_token(self):
token = models.AccessToken(
user=self.user,
client=self.public_client,
)
self.assertEqual(self.adapter.get_client_for_token(token), self.public_client)
def test_get_access_token(self):
token = self.adapter.create_access_token_for_test(
'token-id',
client=self.public_client,
user=self.user,
expires=now() + timedelta(days=30),
)
self.assertEqual(self.adapter.get_access_token(token_string='token-id'), token)
|
msincenselee/vnpy
|
refs/heads/vnpy2
|
vnpy/chart/base.py
|
1
|
from vnpy.trader.ui import QtGui
WHITE_COLOR = (255, 255, 255)
BLACK_COLOR = (0, 0, 0)
GREY_COLOR = (100, 100, 100)
UP_COLOR = (255, 0, 0)
DOWN_COLOR = (0, 255, 50)
CURSOR_COLOR = (255, 245, 162)
PEN_WIDTH = 1
BAR_WIDTH = 0.4
AXIS_WIDTH = 0.8
NORMAL_FONT = QtGui.QFont("Arial", 9)
def to_int(value: float) -> int:
""""""
return int(round(value, 0))
|
ollej/piapi
|
refs/heads/master
|
pijobs/buildsjob.py
|
1
|
import scrollphat
from pijobs.scrollphatjob import ScrollphatJob
from pyteamcity import TeamCity
class BuildsJob(ScrollphatJob):
def __init__(self, options):
self.tc = TeamCity('dashing', 'l,sA-j2s9a', 'https://ci.avidity.se/httpAuth/app/rest/')
def run(self):
print(self.tc.get_projects())
BuildsJob({}).run()
|
yodalee/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/localpaths.py
|
40
|
import os
import sys
here = os.path.abspath(os.path.split(__file__)[0])
repo_root = os.path.abspath(os.path.join(here, os.pardir))
sys.path.insert(0, os.path.join(repo_root, "tools"))
sys.path.insert(0, os.path.join(repo_root, "tools", "six"))
sys.path.insert(0, os.path.join(repo_root, "tools", "html5lib"))
sys.path.insert(0, os.path.join(repo_root, "tools", "wptserve"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pywebsocket", "src"))
sys.path.insert(0, os.path.join(repo_root, "tools", "py"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pytest"))
sys.path.insert(0, os.path.join(repo_root, "tools", "webdriver"))
|
andrewsmedina/django
|
refs/heads/master
|
tests/contenttypes_tests/models.py
|
58
|
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/views/authors/%s/' % self.id
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author)
date_created = models.DateTimeField()
def __str__(self):
return self.title
|
sahmed95/sympy
|
refs/heads/master
|
sympy/series/residues.py
|
84
|
"""
This module implements the Residue function and related tools for working
with residues.
"""
from __future__ import print_function, division
from sympy import sympify
from sympy.utilities.timeutils import timethis
@timethis('residue')
def residue(expr, x, x0):
"""
Finds the residue of ``expr`` at the point x=x0.
The residue is defined as the coefficient of 1/(x-x0) in the power series
expansion about x=x0.
Examples
========
>>> from sympy import Symbol, residue, sin
>>> x = Symbol("x")
>>> residue(1/x, x, 0)
1
>>> residue(1/x**2, x, 0)
0
>>> residue(2/sin(x), x, 0)
2
This function is essential for the Residue Theorem [1].
References
==========
1. http://en.wikipedia.org/wiki/Residue_theorem
"""
# The current implementation uses series expansion to
# calculate it. A more general implementation is explained in
# the section 5.6 of the Bronstein's book {M. Bronstein:
# Symbolic Integration I, Springer Verlag (2005)}. For purely
# rational functions, the algorithm is much easier. See
# sections 2.4, 2.5, and 2.7 (this section actually gives an
# algorithm for computing any Laurent series coefficient for
# a rational function). The theory in section 2.4 will help to
# understand why the resultant works in the general algorithm.
# For the definition of a resultant, see section 1.4 (and any
# previous sections for more review).
from sympy import collect, Mul, Order, S
expr = sympify(expr)
if x0 != 0:
expr = expr.subs(x, x + x0)
for n in [0, 1, 2, 4, 8, 16, 32]:
if n == 0:
s = expr.series(x, n=0)
else:
s = expr.nseries(x, n=n)
if s.has(Order) and s.removeO() == 0:
# bug in nseries
continue
if not s.has(Order) or s.getn() >= 0:
break
if s.has(Order) and s.getn() < 0:
raise NotImplementedError('Bug in nseries?')
s = collect(s.removeO(), x)
if s.is_Add:
args = s.args
else:
args = [s]
res = S(0)
for arg in args:
c, m = arg.as_coeff_mul(x)
m = Mul(*m)
if not (m == 1 or m == x or (m.is_Pow and m.exp.is_Integer)):
raise NotImplementedError('term of unexpected form: %s' % m)
if m == 1/x:
res += c
return res
|
FaustoMora/Proyecto1_Problemas
|
refs/heads/master
|
airbnb_mexico/airbnb_mexico/pipelines.py
|
1
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class AirbnbMexicoPipeline(object):
def process_item(self, item, spider):
return item
|
schatt/zulip
|
refs/heads/master
|
zerver/test_events.py
|
116
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from zerver.models import (
get_client, get_realm, get_stream, get_user_profile_by_email,
Message, Recipient,
)
from zerver.lib.actions import (
apply_events,
create_stream_if_needed,
do_add_alert_words,
do_add_realm_emoji,
do_add_realm_filter,
do_change_avatar_source,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_full_name,
do_change_is_admin,
do_change_stream_description,
do_create_user,
do_deactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_realm_emoji,
do_remove_realm_filter,
do_remove_subscription,
do_rename_stream,
do_set_muted_topics,
do_set_realm_name,
do_set_realm_restricted_to_domain,
do_set_realm_invite_required,
do_set_realm_invite_by_admins_only,
do_update_message,
do_update_pointer,
do_change_twenty_four_hour_time,
do_change_left_side_userlist,
fetch_initial_state_data,
)
from zerver.lib.event_queue import allocate_client_descriptor
from zerver.lib.test_helpers import AuthedTestCase, POSTRequestMock
from zerver.lib.validator import (
check_bool, check_dict, check_int, check_list, check_string,
equals, check_none_or
)
from zerver.views import _default_all_public_streams, _default_narrow
from zerver.tornadoviews import get_events_backend
from collections import OrderedDict
import ujson
class GetEventsTest(AuthedTestCase):
def tornado_call(self, view_func, user_profile, post_data,
callback=None):
request = POSTRequestMock(post_data, user_profile, callback)
return view_func(request, user_profile)
def test_get_events(self):
email = "hamlet@zulip.com"
recipient_email = "othello@zulip.com"
user_profile = get_user_profile_by_email(email)
recipient_user_profile = get_user_profile_by_email(recipient_email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0, True)
local_id = 10.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id += 0.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self):
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"narrow": ujson.dumps([["stream", "denmark"]]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0, True)
self.send_message(email, "othello@zulip.com", Recipient.PERSONAL, "hello")
self.send_message(email, "Denmark", Recipient.STREAM, "hello")
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1, True)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["display_recipient"], "Denmark")
class EventsRegisterTest(AuthedTestCase):
maxDiff = None
user_profile = get_user_profile_by_email("hamlet@zulip.com")
bot = get_user_profile_by_email("welcome-bot@zulip.com")
def create_bot(self, email):
return do_create_user(email, '123',
get_realm('zulip.com'), 'Test Bot', 'test',
bot=True, bot_owner=self.user_profile)
def realm_bot_schema(self, field_name, check):
return check_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict([
('email', check_string),
(field_name, check),
])),
])
def do_test(self, action, event_types=None):
client = allocate_client_descriptor(self.user_profile.id, self.user_profile.realm.id,
event_types,
get_client("website"), True, False, 600, [])
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(self.user_profile, event_types, "")
action()
events = client.event_queue.contents()
self.assertTrue(len(events) > 0)
apply_events(hybrid_state, events, self.user_profile)
normal_state = fetch_initial_state_data(self.user_profile, event_types, "")
self.match_states(hybrid_state, normal_state)
return events
def assert_on_error(self, error):
if error:
raise AssertionError(error)
def match_states(self, state1, state2):
def normalize(state):
state['realm_users'] = {u['email']: u for u in state['realm_users']}
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1, state2)
def test_send_message_events(self):
schema_checker = check_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', check_dict([
('avatar_url', check_string),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('gravatar_hash', check_string),
('id', check_int),
('recipient_id', check_int),
('sender_domain', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('subject', check_string),
('subject_links', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
events = self.do_test(lambda: self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
('orig_subject', check_string),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
# There is also a timestamp field in the event, but we ignore it, as
# it's kind of an unwanted but harmless side effect of calling log_event.
])
message_id = Message.objects.order_by('-id')[0].id
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
events = self.do_test(lambda: do_update_message(self.user_profile, message_id, topic, propagate_mode, content))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self):
schema_checker = check_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self):
realm_user_add_checker = check_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
])),
])
stream_create_checker = check_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
])))
])
events = self.do_test(lambda: self.register("test1", "test1"))
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
error = stream_create_checker('events[1]', events[1])
self.assert_on_error(error)
def test_alert_words_events(self):
alert_words_checker = check_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_muted_topics_events(self):
muted_topics_checker = check_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
events = self.do_test(lambda: do_set_muted_topics(self.user_profile, [["Denmark", "topic"]]))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self):
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_name(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('name')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_name(self.user_profile.realm, 'New Realm Name'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_restricted_to_domain(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('restricted_to_domain')),
('value', check_bool),
])
# The first True is probably a noop, then we get transitions in both directions.
for restricted_to_domain in (True, False, True):
events = self.do_test(lambda: do_set_realm_restricted_to_domain(self.user_profile.realm, restricted_to_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_required(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_required')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_required in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_required(self.user_profile.realm, invite_required))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_by_admins_only(self):
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_by_admins_only(self.user_profile.realm, invite_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self):
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('is_admin', check_bool),
])),
])
# The first False is probably a noop, then we get transitions in both directions.
for is_admin in [False, True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_twenty_four_hour_time(self):
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('twenty_four_hour_time')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_twenty_four_hour_time(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_left_side_userlist(self):
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('left_side_userlist')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_left_side_userlist(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_emoji_events(self):
schema_checker = check_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
events = self.do_test(lambda: do_add_realm_emoji(get_realm("zulip.com"), "my_emoji",
"https://realm.com/my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(get_realm("zulip.com"), "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self):
schema_checker = check_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(get_realm("zulip.com"), "#[123]",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(get_realm("zulip.com"), "#[123]"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self):
bot_created_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
])),
])
action = lambda: self.create_bot('test-bot@zulip.com')
events = self.do_test(action)
error = bot_created_checker('events[1]', events[1])
self.assert_on_error(error)
def test_change_bot_full_name(self):
action = lambda: do_change_full_name(self.bot, 'New Bot Name')
events = self.do_test(action)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self):
action = lambda: do_regenerate_api_key(self.bot)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self):
action = lambda: do_change_avatar_source(self.bot, self.bot.AVATAR_FROM_USER)
events = self.do_test(action)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self):
action = lambda: do_change_default_all_public_streams(self.bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self):
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_sending_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self):
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_events_register_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self):
bot_deactivate_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_rename_stream(self):
realm = get_realm('zulip.com')
stream, _ = create_stream_if_needed(realm, 'old_name')
new_name = u'stream with a brand new name'
self.subscribe_to_stream(self.user_profile.email, stream.name)
action = lambda: do_rename_stream(realm, stream.name, new_name)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_subscribe_events(self):
subscription_schema_checker = check_list(
check_dict([
('color', check_string),
('description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('in_home_view', check_bool),
('name', check_string),
('desktop_notifications', check_bool),
('audible_notifications', check_bool),
('stream_id', check_int),
('subscribers', check_list(check_int)),
])
)
add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_email', check_string),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_email', check_string),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('name', check_string),
])
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action, event_types=["subscription", "realm_user"])
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("othello@zulip.com", "test_stream")
events = self.do_test(action)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
action = lambda: do_remove_subscription(get_user_profile_by_email("othello@zulip.com"), stream)
events = self.do_test(action)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_remove_subscription(get_user_profile_by_email("hamlet@zulip.com"), stream)
events = self.do_test(action)
error = remove_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(get_realm('zulip.com'), 'test_stream', u'new description')
events = self.do_test(action)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
from zerver.lib.event_queue import EventQueue
class EventQueueTest(TestCase):
def test_one_event(self):
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
self.assertFalse(queue.empty())
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"}])
def test_event_collapsing(self):
queue = EventQueue("1")
for pointer_val in xrange(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{'id': 8,
'type': 'pointer',
"pointer": 9,
"timestamp": "9"}])
queue = EventQueue("2")
for pointer_val in xrange(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "unknown"})
queue.push({"type": "restart", "server_generation": "1"})
for pointer_val in xrange(11, 20):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "restart", "server_generation": "2"})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"}])
for pointer_val in xrange(21, 23):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"},
{'id': 22,
'type': 'pointer',
"pointer": 22,
"timestamp": "22"},
])
def test_flag_add_collapsing(self):
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "read",
"operation": "add",
"messages": [1,2,3,4,5,6],
"timestamp": "1"}])
def test_flag_remove_collapsing(self):
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1,2,3,4,5,6],
"timestamp": "1"}])
def test_collapse_event(self):
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
queue.push({"type": "unknown",
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"},
{'id': 1,
'type': 'unknown',
"timestamp": "1"}])
class TestEventsRegisterAllPublicStreamsDefaults(TestCase):
def setUp(self):
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
def test_use_passed_all_public_true_default_false(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self):
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self):
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(TestCase):
def setUp(self):
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self):
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [('stream', 'my_stream')])
self.assertEqual(result, [('stream', 'my_stream')])
def test_use_passed_narrow_with_default(self):
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [('stream', 'my_stream')])
self.assertEqual(result, [('stream', 'my_stream')])
def test_use_default_if_narrow_is_empty(self):
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [('stream', 'Verona')])
def test_use_narrow_if_default_is_none(self):
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
|
mmgen/mmgen
|
refs/heads/master
|
mmgen/color.py
|
1
|
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution
# Copyright (C)2013-2021 The MMGen Project <mmgen@tuta.io>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
color.py: color handling for the MMGen suite
"""
_colors = {
'black': ( 232, (30,0) ),
'red': ( 210, (31,1) ),
'green': ( 121, (32,1) ),
'yellow': ( 229, (33,1) ),
'blue': ( 75, (34,1) ),
'magenta': ( 205, (35,1) ),
'cyan': ( 122, (36,1) ),
'pink': ( 218, (35,1) ),
'orange': ( 216, (31,1) ),
'gray': ( 246, (30,1) ),
'purple': ( 141, (35,1) ),
'brown': ( 208, (33,0) ),
'grndim': ( 108, (32,0) ),
'redbg': ( (232,210), (30,101) ),
'grnbg': ( (232,121), (30,102) ),
'blubg': ( (232,75), (30,104) ),
'yelbg': ( (232,229), (30,103) ),
}
for _c in _colors:
_e = _colors[_c]
globals()['_256_'+_c] = '\033[38;5;{};1m'.format(_e[0]) if type(_e[0]) == int \
else '\033[38;5;{};48;5;{};1m'.format(*_e[0])
globals()['_16_'+_c] = '\033[{}m'.format(_e[1][0]) if _e[1][1] == 0 \
else '\033[{};{}m'.format(*_e[1])
globals()['_clr_'+_c] = ''; _reset = ''
exec('def {c}(s): return _clr_{c}+s+_reset'.format(c=_c))
def nocolor(s): return s
def get_terminfo_colors(term=None):
from subprocess import run,PIPE
cmd = ['infocmp','-0']
if term:
cmd.append(term)
def is_hex_str(s):
from string import hexdigits
return set(list(s)) <= set(list(hexdigits))
try:
cmdout = run(cmd,stdout=PIPE,check=True).stdout.decode()
except:
return None
else:
s = [e.split('#')[1] for e in cmdout.split(',') if e.startswith('colors')][0]
if s.isdecimal():
return int(s)
elif s.startswith('0x') and is_hex_str(s[2:]):
return int(s[2:],16)
else:
return None
def init_color(num_colors='auto'):
assert num_colors in ('auto',8,16,256)
globals()['_reset'] = '\033[0m'
import os
t = os.getenv('TERM')
if num_colors in (8,16):
pfx = '_16_'
elif num_colors == 256 or (t and t.endswith('256color')) or get_terminfo_colors() == 256:
pfx = '_256_'
else:
pfx = '_16_'
for c in _colors:
globals()['_clr_'+c] = globals()[pfx+c]
def start_mscolor():
import sys
from .globalvars import g
try:
import colorama
colorama.init(strip=True,convert=True)
except:
from .util import msg
msg('Import of colorama module failed')
else:
g.stdout = sys.stdout
g.stderr = sys.stderr
|
seize-the-dave/XlsxWriter
|
refs/heads/master
|
xlsxwriter/test/comparison/test_chart_bar19.py
|
8
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_bar19.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [66558592, 66569344]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'name': '=Sheet1!$A$2'})
chart.set_y_axis({'name': '=Sheet1!$A$3'})
chart.set_title({'name': '=Sheet1!$A$1'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
saturn597/stem
|
refs/heads/master
|
test/integ/__init__.py
|
8
|
"""
Integration tests for the stem library.
"""
__all__ = [
"connection",
"control",
"descriptor",
"response",
"socket",
"util",
"process",
"version",
]
|
stuarth/pixie
|
refs/heads/master
|
pixie/vm/libs/string.py
|
10
|
import pixie.vm.rt as rt
from pixie.vm.string import String
from pixie.vm.code import as_var, intern_var, wrap_fn, MultiArityFn
from pixie.vm.object import affirm, runtime_error
from pixie.vm.numbers import Integer
from rpython.rlib.unicodedata import unicodedb_6_2_0 as unicodedb
import rpython.rlib.rstring as rstring
import pixie.vm.rt as rt
@as_var("pixie.string.internal", "starts-with")
def startswith(a, b):
return rt.wrap(rt.name(a).startswith(rt.name(b)))
@as_var("pixie.string.internal", "ends-with")
def endswith(a, b):
return rt.wrap(rt.name(a).endswith(rt.name(b)))
@as_var("pixie.string.internal", "split")
def split(a, b):
affirm(rt.count(b) > 0, u"separator can't be empty")
v = rt.vector()
for s in rstring.split(rt.name(a), rt.name(b)):
v = rt.conj(v, rt.wrap(s))
return v
def index_of2(a, sep):
return rt.wrap(rt.name(a).find(rt.name(sep)))
def index_of3(a, sep, start):
affirm(isinstance(start, Integer), u"Third argument must be an integer")
start = start.int_val()
if start >= 0:
return rt.wrap(rt.name(a).find(rt.name(sep), start))
else:
runtime_error(u"Third argument must be a non-negative integer")
def index_of4(a, sep, start, end):
affirm(isinstance(start, Integer) and isinstance(end, Integer), u"Third and fourth argument must be integers")
start = start.int_val()
end = end.int_val()
if start >= 0 and end >= 0:
return rt.wrap(rt.name(a).find(rt.name(sep), start, end))
else:
runtime_error(u"Third and fourth argument must be non-negative integers")
index_of = intern_var(u"pixie.string.internal", u"index-of")
index_of.set_root(MultiArityFn(u"index-of", {2: wrap_fn(index_of2), 3: wrap_fn(index_of3), 4: wrap_fn(index_of4)},
required_arity = 2))
def substring2(a, start):
return substring3(a, start, rt._count(a))
def substring3(a, start, end):
affirm(isinstance(a, String), u"First argument must be a string")
affirm(isinstance(start, Integer) and isinstance(end, Integer), u"Second and third argument must be integers")
start = start.int_val()
end = end.int_val()
if start >= 0 and end >= 0:
return rt.wrap(rt.name(a)[start:end])
else:
runtime_error(u"Second and third argument must be non-negative integers")
substring = intern_var(u"pixie.string.internal", u"substring")
substring.set_root(MultiArityFn(u"substring", {2: wrap_fn(substring2), 3: wrap_fn(substring3)},
required_arity = 2))
@as_var("pixie.string.internal", "upper-case")
def upper_case(a):
a = rt.name(a)
res = ""
for ch in a:
res += chr(unicodedb.toupper(ord(ch)))
return rt.wrap(res)
@as_var("pixie.string.internal", "lower-case")
def lower_case(a):
a = rt.name(a)
res = ""
for ch in a:
res += chr(unicodedb.tolower(ord(ch)))
return rt.wrap(res)
@as_var("pixie.string.internal", "capitalize")
def capitalize(a):
a = rt.name(a)
res = u""
res += unichr(unicodedb.toupper(ord(a[0])))
res += a[1:]
return rt.wrap(res)
@as_var("pixie.string.internal", "trim")
def trim(a):
a = rt.name(a)
i = 0
while i < len(a) and unicodedb.isspace(ord(a[i])):
i += 1
j = len(a)
while j > 0 and unicodedb.isspace(ord(a[j - 1])):
j -= 1
if j <= i:
return rt.wrap(u"")
return rt.wrap(a[i:j])
@as_var("pixie.string.internal", "triml")
def triml(a):
a = rt.name(a)
i = 0
while i < len(a) and unicodedb.isspace(ord(a[i])):
i += 1
return rt.wrap(a[i:len(a)])
@as_var("pixie.string.internal", "trimr")
def trimr(a):
a = rt.name(a)
j = len(a)
while j > 0 and unicodedb.isspace(ord(a[j - 1])):
j -= 1
if j <= 0:
return rt.wrap(u"")
return rt.wrap(a[0:j])
|
shaded-enmity/dnf
|
refs/heads/master
|
tests/test_cli_progress.py
|
12
|
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
from __future__ import absolute_import
from __future__ import unicode_literals
from tests.support import mock
import dnf.callback
import dnf.cli.progress
import dnf.pycomp
import tests.support
import time
class MockStdout(dnf.pycomp.StringIO):
def visible_lines(self):
lines = self.lines()
last = len(lines) - 1
return [l[:-1] for (i, l) in enumerate(lines)
if l.endswith('\n') or i == last]
def lines(self):
return self.getvalue().splitlines(True)
class FakePayload(object):
def __init__(self, string, size):
self.string = string
self.size = size
def __str__(self):
return self.string
@property
def download_size(self):
return self.size
class ProgressTest(tests.support.TestCase):
def test_single(self):
now = 1379406823.9
fo = MockStdout()
with mock.patch('dnf.cli.progress._term_width', return_value=60), \
mock.patch('dnf.cli.progress.time', lambda: now):
p = dnf.cli.progress.MultiFileProgressMeter(fo)
pload = FakePayload('dummy-text', 5)
p.start(1, 1)
for i in range(6):
now += 1.0
p.progress(pload, i)
p.end(pload, None, None)
self.assertEquals(fo.lines(), [
'dummy-text 0% [ ] --- B/s | 0 B --:-- ETA\r',
'dummy-text 20% [== ] 1.0 B/s | 1 B 00:04 ETA\r',
'dummy-text 40% [==== ] 1.0 B/s | 2 B 00:03 ETA\r',
'dummy-text 60% [====== ] 1.0 B/s | 3 B 00:02 ETA\r',
'dummy-text 80% [======== ] 1.0 B/s | 4 B 00:01 ETA\r',
'dummy-text100% [==========] 1.0 B/s | 5 B 00:00 ETA\r',
'dummy-text 1.0 B/s | 5 B 00:05 \n'])
def test_mirror(self):
fo = MockStdout()
p = dnf.cli.progress.MultiFileProgressMeter(fo, update_period=-1)
p.start(1, 5)
pload = FakePayload('foo', 5.0)
now = 1379406823.9
with mock.patch('dnf.cli.progress._term_width', return_value=60), \
mock.patch('dnf.cli.progress.time', lambda: now):
p.progress(pload, 3)
p.end(pload, dnf.callback.STATUS_MIRROR, 'Timeout.')
p.progress(pload, 4)
self.assertEqual(fo.visible_lines(), [
'[MIRROR] foo: Timeout. ',
'foo 80% [======== ] --- B/s | 4 B --:-- ETA'])
_REFERENCE_TAB = [
['(1-2/2): f 0% [ ] --- B/s | 0 B --:-- ETA'],
['(1-2/2): b 10% [= ] 2.2 B/s | 3 B 00:12 ETA'],
['(1-2/2): f 20% [== ] 2.4 B/s | 6 B 00:10 ETA'],
['(1-2/2): b 30% [=== ] 2.5 B/s | 9 B 00:08 ETA'],
['(1-2/2): f 40% [==== ] 2.6 B/s | 12 B 00:06 ETA'],
['(1-2/2): b 50% [===== ] 2.7 B/s | 15 B 00:05 ETA'],
['(1-2/2): f 60% [====== ] 2.8 B/s | 18 B 00:04 ETA'],
['(1-2/2): b 70% [======= ] 2.8 B/s | 21 B 00:03 ETA'],
['(1-2/2): f 80% [======== ] 2.9 B/s | 24 B 00:02 ETA'],
['(1-2/2): b 90% [========= ] 2.9 B/s | 27 B 00:01 ETA'],
['(1/2): foo 1.0 B/s | 10 B 00:10 ',
'(2/2): bar100% [==========] 2.9 B/s | 30 B 00:00 ETA']]
def test_multi(self):
now = 1379406823.9
fo = MockStdout()
with mock.patch('dnf.cli.progress._term_width', return_value=60), \
mock.patch('dnf.cli.progress.time', lambda: now):
p = dnf.cli.progress.MultiFileProgressMeter(fo)
p.start(2, 30)
pload1 = FakePayload('foo', 10.0)
pload2 = FakePayload('bar', 20.0)
for i in range(11):
p.progress(pload1, float(i))
if i == 10:
p.end(pload1, None, None)
now += 0.5
p.progress(pload2, float(i*2))
self.assertEquals(self._REFERENCE_TAB[i], fo.visible_lines())
if i == 10:
p.end(pload2, dnf.callback.STATUS_FAILED, 'some error')
now += 0.5
# check "end" events
self.assertEqual(fo.visible_lines(), [
'(1/2): foo 1.0 B/s | 10 B 00:10 ',
'[FAILED] bar: some error '])
self.assertTrue(2.0 < p.rate < 4.0)
@mock.patch('dnf.cli.progress._term_width', return_value=40)
def test_skip(self, mock_term_width):
fo = MockStdout()
p = dnf.cli.progress.MultiFileProgressMeter(fo)
p.start(2, 30)
pload1 = FakePayload('club', 20.0)
p.end(pload1, dnf.callback.STATUS_ALREADY_EXISTS, 'already got')
self.assertEqual(p.done_files, 1)
self.assertEqual(p.done_size, pload1.size)
self.assertEqual(fo.getvalue(),
'[SKIPPED] club: already got \n')
|
donfucius/pyew
|
refs/heads/master
|
plugins/packer.py
|
4
|
#!/usr/bin/env python
"""
Pyew! A Python Tool like the populars *iew
Copyright (C) 2009,2010 Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pefile
import peutils
def checkPacker(pyew, doprint=True):
""" Check if the PE file is packed """
if pyew.pe is None:
return
sig = peutils.SignatureDatabase(os.path.join(os.path.dirname(__file__), "UserDB.TXT"))
matches = sig.match_all(pyew.pe, ep_only = True)
if not matches:
if doprint:
print "***No match"
return
if doprint:
for match in matches:
print "".join(match)
if len(matches) == 0:
if doprint:
print "***No match"
return
return matches
functions = {"packer":checkPacker}
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/contrib/release/sphinx-to-rst.py
|
44
|
#!/usr/bin/env python
import os
import re
import sys
dirname = ""
RE_CODE_BLOCK = re.compile(r'.. code-block:: (.+?)\s*$')
RE_INCLUDE = re.compile(r'.. include:: (.+?)\s*$')
RE_REFERENCE = re.compile(r':(.+?):`(.+?)`')
def include_file(lines, pos, match):
global dirname
orig_filename = match.groups()[0]
filename = os.path.join(dirname, orig_filename)
fh = open(filename)
try:
old_dirname = dirname
dirname = os.path.dirname(orig_filename)
try:
lines[pos] = sphinx_to_rst(fh)
finally:
dirname = old_dirname
finally:
fh.close()
def replace_code_block(lines, pos, match):
lines[pos] = ""
curpos = pos - 1
# Find the first previous line with text to append "::" to it.
while True:
prev_line = lines[curpos]
if not prev_line.isspace():
prev_line_with_text = curpos
break
curpos -= 1
if lines[prev_line_with_text].endswith(":"):
lines[prev_line_with_text] += ":"
else:
lines[prev_line_with_text] += "::"
TO_RST_MAP = {RE_CODE_BLOCK: replace_code_block,
RE_REFERENCE: r'``\2``',
RE_INCLUDE: include_file}
def _process(lines):
lines = list(lines) # non-destructive
for i, line in enumerate(lines):
for regex, alt in TO_RST_MAP.items():
if callable(alt):
match = regex.match(line)
if match:
alt(lines, i, match)
line = lines[i]
else:
lines[i] = regex.sub(alt, line)
return lines
def sphinx_to_rst(fh):
return "".join(_process(fh))
if __name__ == "__main__":
global dirname
dirname = os.path.dirname(sys.argv[1])
fh = open(sys.argv[1])
try:
print(sphinx_to_rst(fh))
finally:
fh.close()
|
fabian4/ceilometer
|
refs/heads/master
|
ceilometer/storage/models.py
|
13
|
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the storage API.
"""
from ceilometer.storage import base
class Resource(base.Model):
"""Something for which sample data has been collected."""
def __init__(self, resource_id, project_id,
first_sample_timestamp,
last_sample_timestamp,
source, user_id, metadata):
"""Create a new resource.
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param first_sample_timestamp: first sample timestamp captured
:param last_sample_timestamp: last sample timestamp captured
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
:param metadata: most current metadata for the resource (a dict)
"""
base.Model.__init__(self,
resource_id=resource_id,
first_sample_timestamp=first_sample_timestamp,
last_sample_timestamp=last_sample_timestamp,
project_id=project_id,
source=source,
user_id=user_id,
metadata=metadata,
)
class Meter(base.Model):
"""Definition of a meter for which sample data has been collected."""
def __init__(self, name, type, unit, resource_id, project_id, source,
user_id):
"""Create a new meter.
:param name: name of the meter
:param type: type of the meter (gauge, delta, cumulative)
:param unit: unit of the meter
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
"""
base.Model.__init__(self,
name=name,
type=type,
unit=unit,
resource_id=resource_id,
project_id=project_id,
source=source,
user_id=user_id,
)
class Sample(base.Model):
"""One collected data point."""
def __init__(self,
source,
counter_name, counter_type, counter_unit, counter_volume,
user_id, project_id, resource_id,
timestamp, resource_metadata,
message_id,
message_signature,
recorded_at,
):
"""Create a new sample.
:param source: the identifier for the user/project id definition
:param counter_name: the name of the measurement being taken
:param counter_type: the type of the measurement
:param counter_unit: the units for the measurement
:param counter_volume: the measured value
:param user_id: the user that triggered the measurement
:param project_id: the project that owns the resource
:param resource_id: the thing on which the measurement was taken
:param timestamp: the time of the measurement
:param resource_metadata: extra details about the resource
:param message_id: a message identifier
:param recorded_at: sample record timestamp
:param message_signature: a hash created from the rest of the
message data
"""
base.Model.__init__(self,
source=source,
counter_name=counter_name,
counter_type=counter_type,
counter_unit=counter_unit,
counter_volume=counter_volume,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=timestamp,
resource_metadata=resource_metadata,
message_id=message_id,
message_signature=message_signature,
recorded_at=recorded_at)
class Statistics(base.Model):
"""Computed statistics based on a set of sample data."""
def __init__(self, unit,
period, period_start, period_end,
duration, duration_start, duration_end,
groupby, **data):
"""Create a new statistics object.
:param unit: The unit type of the data set
:param period: The length of the time range covered by these stats
:param period_start: The timestamp for the start of the period
:param period_end: The timestamp for the end of the period
:param duration: The total time for the matching samples
:param duration_start: The earliest time for the matching samples
:param duration_end: The latest time for the matching samples
:param groupby: The fields used to group the samples.
:param data: some or all of the following aggregates
min: The smallest volume found
max: The largest volume found
avg: The average of all volumes found
sum: The total of all volumes found
count: The number of samples found
aggregate: name-value pairs for selectable aggregates
"""
base.Model.__init__(self, unit=unit,
period=period, period_start=period_start,
period_end=period_end, duration=duration,
duration_start=duration_start,
duration_end=duration_end,
groupby=groupby,
**data)
|
koharjidan/litecoin
|
refs/heads/master
|
share/qt/clean_mac_info_plist.py
|
229
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Litecoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Litecoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
knowledgepoint-devs/askbot-devel
|
refs/heads/merge1
|
askbot/admin.py
|
1
|
# -*- coding: utf-8 -*-
"""
:synopsis: connector to standard Django admin interface
To make more models accessible in the Django admin interface, add more classes subclassing ``django.contrib.admin.Model``
Names of the classes must be like `SomeModelAdmin`, where `SomeModel` must
exactly match name of the model used in the project
"""
from django.conf import settings as django_settings
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from askbot import models
from askbot import const
from askbot.deps.django_authopenid.models import UserEmailVerifier
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
admin.site.register(models.Vote)
admin.site.register(models.FavoriteQuestion)
admin.site.register(models.Award)
admin.site.register(models.Repute)
admin.site.register(models.BulkTagSubscription)
class UserEmailVerifierAdmin(admin.ModelAdmin):
list_display = ('key', 'verified', 'expires_on')
admin.site.register(UserEmailVerifier, UserEmailVerifierAdmin)
class InSite(SimpleListFilter):
title = 'site membership'
parameter_name = 'name'
def lookups(self, request, model_admin):
return tuple([(s.id, 'in site \'%s\''%s.name) for s in Site.objects.all()])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(sites__id=self.value())
else:
return queryset
class TagAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'language_code', 'created_by', 'deleted', 'status', 'in_sites', 'used_count')
list_filter = ('deleted', 'status', 'language_code', InSite)
search_fields = ('name',)
def in_sites(self, obj):
return ', '.join(obj.sites.all().values_list('name', flat=True))
admin.site.register(models.Tag, TagAdmin)
class SpaceAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
admin.site.register(models.Space, SpaceAdmin)
class FeedAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'default_space', 'redirect', 'site')
admin.site.register(models.Feed, FeedAdmin)
class FeedToSpaceAdmin(admin.ModelAdmin):
list_display = ('feed', 'space')
list_filter = ('feed', 'space')
search_fields = ('feed__name', 'space__name')
admin.site.register(models.FeedToSpace, FeedToSpaceAdmin)
class ActivityAdmin(admin.ModelAdmin):
list_display = ('user', 'active_at', 'activity_type', 'question', 'content_type', 'object_id', 'content_object', 'recipients_list', 'receiving_users_list')
list_filter = ('activity_type', 'content_type')
search_fields = ('user__username', 'object_id', 'question__id', 'question__thread__id', 'question__thread__title')
def recipients_list(self, obj):
return ', '.join(obj.recipients.all().values_list('username', flat=True))
def receiving_users_list(self, obj):
return ', '.join(obj.receiving_users.all().values_list('username', flat=True))
admin.site.register(models.Activity, ActivityAdmin)
class IsPersonal(SimpleListFilter):
title = 'is personal group'
parameter_name = 'is_personal'
def lookups(self, request, model_admin):
return (('1', 'Yes'), ('0', 'No'))
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(name__contains=models.user.PERSONAL_GROUP_NAME_PREFIX)
elif self.value() == '0':
return queryset.exclude(name__contains=models.user.PERSONAL_GROUP_NAME_PREFIX)
else:
return queryset
class GroupAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'logo_url', 'description', 'moderate_email', 'moderate_answers_to_enquirers', 'openness', 'is_vip', 'read_only')
list_display_links = ('id', 'name')
list_filter = (IsPersonal, 'moderate_email', 'moderate_answers_to_enquirers', 'openness', 'is_vip', 'read_only')
search_fields = ('name', 'logo_url')
admin.site.register(models.Group, GroupAdmin)
class GroupMembershipAdmin(admin.ModelAdmin):
list_display = ('group', 'user', 'level')
list_filter = ('level',)
search_fields = ('user__username',)
admin.site.register(models.GroupMembership, GroupMembershipAdmin)
class EmailFeedSettingAdmin(admin.ModelAdmin):
list_display = ('id', 'subscriber', 'email_tag_filter_strategy', 'feed_type', 'frequency', 'added_at', 'reported_at' )
list_filter = ('frequency', 'feed_type')
search_fields = ('subscriber__username',)
def email_tag_filter_strategy(self, obj):
if obj.feed_type == 'q_all':
strategy = obj.subscriber.email_tag_filter_strategy
if strategy == const.INCLUDE_ALL:
return 'all tags'
elif strategy == const.EXCLUDE_IGNORED:
return 'exclude ignored tags'
elif strategy == const.INCLUDE_INTERESTING:
return 'only interesting tags'
elif strategy == const.INCLUDE_SUBSCRIBED:
return 'include subscribed'
else:
return 'invalid'
else:
return 'n/a'
admin.site.register(models.EmailFeedSetting, EmailFeedSettingAdmin)
class QuestionViewAdmin(admin.ModelAdmin):
list_display = ('who', 'question', 'when')
search_fields = ('who__username',)
admin.site.register(models.QuestionView, QuestionViewAdmin)
class PostToGroupInline(admin.TabularInline):
model = models.PostToGroup
extra = 1
class IsPrivate(SimpleListFilter):
title = 'is private'
parameter_name = 'is_private'
def lookups(self, request, model_admin):
return (('1', 'Yes'), ('0', 'No'))
def queryset(self, request, queryset):
global_group = models.Group.objects.get_global_group()
if self.value() == '1':
return queryset.exclude(groups__id=global_group.id)
elif self.value() == '0':
return queryset.filter(groups__id=global_group.id)
else:
return queryset
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'post_type', 'thread', 'author', 'text_30', 'added_at_with_seconds', 'deleted', 'in_groups', 'is_published', 'is_private', 'vote_up_count', 'language_code')
list_filter = ('deleted', IsPrivate, 'post_type', 'language_code', 'vote_up_count')
search_fields = ('id', 'thread__title', 'text', 'author__username')
inlines = (PostToGroupInline,)
def text_30(self, obj):
return obj.text[:30]
def added_at_with_seconds(self, obj):
return obj.added_at.strftime(TIME_FORMAT)
added_at_with_seconds.admin_order_field = 'added_at'
def in_groups(self, obj):
return ', '.join(obj.groups.exclude(name__startswith=models.user.PERSONAL_GROUP_NAME_PREFIX).values_list('name', flat=True))
def is_published(self, obj):
return obj.thread._question_post().author.get_personal_group() in obj.groups.all()
admin.site.register(models.Post, PostAdmin)
class PostRevisionAdmin(admin.ModelAdmin):
list_display = ('id', 'post_id', 'post_type', 'thread_name', 'revision', 'revised_at_with_seconds', 'author', 'approved', 'is_minor', 'text_start')
list_filter = ('approved', 'is_minor', 'revision')
search_fields = ('text', 'author__username', 'post__id', 'post__thread__title')
ordering = ('-id',)
def post_id(self, obj):
return obj.post.id
def post_type(self, obj):
return obj.post.post_type
def thread_name(self, obj):
return obj.post.thread.title
def revised_at_with_seconds(self, obj):
return obj.revised_at.strftime(TIME_FORMAT)
revised_at_with_seconds.admin_order_field = 'revised_at'
def text_start(self, obj):
return obj.text[:30]
admin.site.register(models.PostRevision, PostRevisionAdmin)
class ThreadToGroupInline(admin.TabularInline):
model = models.ThreadToGroup
extra = 1
class SpacesInline(admin.TabularInline):
model = models.Space.questions.through
extra = 1
class TagsInline(admin.TabularInline):
model = models.Thread.tags.through
extra = 1
class ThreadAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'added_at_with_seconds', 'last_activity_at_with_seconds', 'last_activity_by', 'answer_count', 'deleted', 'closed', 'site', 'in_spaces', 'in_groups', 'has_tags', 'is_private', 'language_code')
list_filter = ('deleted', 'closed', 'language_code', 'site')
search_fields = ('last_activity_by__username', 'title', 'tags__name')
inlines = (ThreadToGroupInline, SpacesInline, TagsInline)
def added_at_with_seconds(self, obj):
return obj.added_at.strftime(TIME_FORMAT)
added_at_with_seconds.admin_order_field = 'added_at'
def last_activity_at_with_seconds(self, obj):
return obj.last_activity_at.strftime(TIME_FORMAT)
last_activity_at_with_seconds.admin_order_field = 'last_activity_at'
def in_groups(self, obj):
return ', '.join(obj.groups.exclude(name__startswith=models.user.PERSONAL_GROUP_NAME_PREFIX).values_list('name', flat=True))
def in_spaces(self, obj):
return ', '.join(obj.spaces.all().values_list('name', flat=True))
def has_tags(self, obj):
return ', '.join(obj.tags.all().values_list('name', flat=True))
admin.site.register(models.Thread, ThreadAdmin)
class NonPersonalGroupFilter(SimpleListFilter):
title = 'non-personal group'
parameter_name = 'non_personal_group'
def lookups(self, request, model_admin):
return tuple([(group.id, "%s group"%group.name) for group in models.Group.objects.exclude(name__contains=models.user.PERSONAL_GROUP_NAME_PREFIX)])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(group__id=self.value())
else:
return queryset
class AskWidgetAdmin(admin.ModelAdmin):
list_display = ('id', 'site', 'title', 'group', 'tag', 'include_text_field', 'has_inner_style', 'has_outer_style')
list_filter = ('include_text_field', NonPersonalGroupFilter)
search_fields = ('title', 'tag')
def has_inner_style(self, obj):
return obj.inner_style.strip() != u''
def has_outer_style(self, obj):
return obj.outer_style.strip() != u''
admin.site.register(models.AskWidget, AskWidgetAdmin)
class QuestionWidgetAdmin(admin.ModelAdmin):
list_display = ('id', 'site', 'title', 'question_number', 'tagnames', 'group', 'has_search_query', 'order_by', 'has_style')
list_filter = (NonPersonalGroupFilter,)
search_fields = ('title', 'tagnames')
def has_style(self, obj):
return obj.style.strip() != u''
def has_search_query(self, obj):
return obj.search_query.strip() != u''
admin.site.register(models.QuestionWidget, QuestionWidgetAdmin)
class DraftQuestionAdmin(admin.ModelAdmin):
list_display = ('id', 'author', 'title', 'tagnames')
search_fields = ('author__username', 'title', 'tagnames')
admin.site.register(models.DraftQuestion, DraftQuestionAdmin)
class ReplyAddressAdmin(admin.ModelAdmin):
list_display = ('id', 'address', 'user', 'reply_action')
list_display_links = ('id', 'address')
search_fields = ('address', 'user__username')
list_filter = ('reply_action',)
admin.site.register(models.ReplyAddress, ReplyAddressAdmin)
from django.contrib.sites.models import Site
try:
admin.site.unregister(Site)
finally:
from django.contrib.sites.admin import SiteAdmin as OrigSiteAdmin
class SiteAdmin(OrigSiteAdmin):
list_display = ('id',) + OrigSiteAdmin.list_display
admin.site.register(Site, SiteAdmin)
class SubscribedToSite(InSite):
title = 'subscribed to site'
def queryset(self, request, queryset):
if self.value():
return queryset.filter(subscribed_sites__id=self.value())
else:
return queryset
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('auth_user', 'default_site', 'subs_sites', 'primary_group')
list_filter = ('default_site', SubscribedToSite)
search_fields = ('auth_user__username',)
filter_horizontal = ('subscribed_sites',)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'primary_group':
global_group = models.Group.objects.get_global_group()
groups = models.Group.objects.exclude(pk=global_group.id)
groups = groups.exclude_personal()
kwargs['queryset'] = groups
return super(UserProfileAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def subs_sites(self, obj):
return ', '.join(obj.subscribed_sites.all().values_list('name', flat=True))
admin.site.register(models.UserProfile, UserProfileAdmin)
from django.contrib.auth.models import User
try:
admin.site.unregister(User)
finally:
class InGroup(SimpleListFilter):
title = 'group membership'
parameter_name = 'in_group'
def lookups(self, request, model_admin):
return tuple([(g.id, 'in group \'%s\''%g.name) for g in models.Group.objects.exclude(name__startswith=models.user.PERSONAL_GROUP_NAME_PREFIX)])
def queryset(self, request, queryset):
if self.value():
return queryset.filter(groups__id=self.value())
else:
return queryset
class _UserBooleanMethodListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (('true', 'yes'), ('false', 'no'))
def queryset(self, request, queryset):
if self.value():
target_boolean = self.value() == 'true'
admin_ids = []
for user in User.objects.all():
if bool(getattr(user, self.method)()) == target_boolean:
admin_ids.append(user.id)
return queryset.filter(id__in=admin_ids)
else:
return queryset
class IsAdministrator(_UserBooleanMethodListFilter):
method = 'is_administrator'
title = 'is administrator'
parameter_name = 'is_administrator'
class IsModerator(_UserBooleanMethodListFilter):
method = 'is_moderator'
title = 'is moderator'
parameter_name = 'is_moderator'
class SeesThreadsInLanguage(SimpleListFilter):
title = "sees Threads in language"
parameter_name = "sees_threads_in_lang"
def lookups(self, request, model_admin):
return django_settings.LANGUAGES
def queryset(self, request, queryset):
if self.value():
return queryset.filter(languages__icontains=self.value())
return queryset
from django.contrib.auth.admin import UserAdmin as OrigUserAdmin
class UserAdmin(OrigUserAdmin):
list_display = OrigUserAdmin.list_display + ('languages', 'country',
'date_joined', 'reputation',
'is_administrator', 'status', 'is_moderator', 'is_fake', 'email_isvalid',
'my_interesting_tags', 'interesting_tag_wildcards',
'my_ignored_tags', 'ignored_tag_wildcards',
'my_subscribed_tags', 'subscribed_tag_wildcards',
'email_tag_filter_strategy', 'display_tag_filter_strategy',
'get_groups', 'get_primary_group', 'get_default_site')
list_filter = OrigUserAdmin.list_filter + (IsAdministrator, 'status', IsModerator, 'is_fake', 'email_isvalid', 'email_tag_filter_strategy', 'display_tag_filter_strategy', SeesThreadsInLanguage, InGroup)
search_fields = OrigUserAdmin.search_fields + ('country',)
def interesting_tag_wildcards(self, obj):
return ', '.join(obj.interesting_tags.strip().split())
def my_interesting_tags(self, obj):
return ', '.join(obj.get_marked_tags('good').values_list('name', flat=True))
def ignored_tag_wildcards(self, obj):
return ', '.join(obj.ignored_tags.strip().split())
def my_ignored_tags(self, obj):
return ', '.join(obj.get_marked_tags('bad').values_list('name', flat=True))
def subscribed_tag_wildcards(self, obj):
return ', '.join(obj.subscribed_tags.strip().split())
def my_subscribed_tags(self, obj):
return ', '.join(obj.get_marked_tags('subscribed').values_list('name', flat=True))
admin.site.register(User, UserAdmin)
try:
from avatar.models import Avatar
except ImportError:
pass # avatar not installed, so no matter
else:
from django.contrib.admin.sites import NotRegistered
try:
admin.site.unregister(Avatar)
except NotRegistered:
print u"Move 'avatar' above 'askbot' in INSTALLED_APPS to get a more useful admin view for Avatar model"
else:
class AvatarAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'avatar', 'primary', 'date_uploaded')
list_filter = ('primary', 'date_uploaded')
search_fields = ('user__username', 'user__email', 'avatar')
admin.site.register(Avatar, AvatarAdmin)
|
cmoutard/mne-python
|
refs/heads/master
|
mne/simulation/tests/test_evoked.py
|
9
|
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_forward_solution
from mne.simulation import simulate_sparse_stc, simulate_evoked
from mne import read_cov
from mne.io import Raw
from mne import pick_types_forward, read_evokeds
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
stc._data *= 1e-9
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
assert_array_equal(evoked_1.data, evoked_2.data)
run_tests_if_main()
|
tensorflow/tfx
|
refs/heads/master
|
tfx/utils/io_utils.py
|
1
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for I/O."""
import os
import tempfile
from typing import List, TypeVar
from tfx.dsl.io import fileio
from google.protobuf import json_format
from google.protobuf import text_format
from google.protobuf.message import Message
try:
from tensorflow_metadata.proto.v0.schema_pb2 import Schema as schema_pb2_Schema # pylint: disable=g-import-not-at-top,g-importing-member
except ModuleNotFoundError as e:
schema_pb2_Schema = None # pylint: disable=invalid-name
# Nano seconds per second.
NANO_PER_SEC = 1000 * 1000 * 1000
# If path starts with one of those, consider files are in remote filesystem.
_REMOTE_FS_PREFIX = ['gs://', 'hdfs://', 's3://']
def ensure_local(file_path: str) -> str:
"""Ensures that the given file path is made available locally."""
if not any([file_path.startswith(prefix) for prefix in _REMOTE_FS_PREFIX]):
return file_path
temp_dir = tempfile.mkdtemp()
local_path = os.path.join(temp_dir, os.path.basename(file_path))
copy_file(file_path, local_path, True)
return local_path
def copy_file(src: str, dst: str, overwrite: bool = False):
"""Copies a single file from source to destination."""
if overwrite and fileio.exists(dst):
fileio.remove(dst)
dst_dir = os.path.dirname(dst)
fileio.makedirs(dst_dir)
fileio.copy(src, dst, overwrite=overwrite)
def copy_dir(src: str, dst: str) -> None:
"""Copies the whole directory recursively from source to destination."""
src = src.rstrip('/')
dst = dst.rstrip('/')
if fileio.exists(dst):
fileio.rmtree(dst)
fileio.makedirs(dst)
for dir_name, sub_dirs, leaf_files in fileio.walk(src):
for leaf_file in leaf_files:
leaf_file_path = os.path.join(dir_name, leaf_file)
new_file_path = os.path.join(dir_name.replace(src, dst, 1), leaf_file)
fileio.copy(leaf_file_path, new_file_path)
for sub_dir in sub_dirs:
fileio.makedirs(os.path.join(dir_name.replace(src, dst, 1), sub_dir))
def get_only_uri_in_dir(dir_path: str) -> str:
"""Gets the only uri from given directory."""
files = fileio.listdir(dir_path)
if len(files) != 1:
raise RuntimeError(
'Only one file per dir is supported: {}.'.format(dir_path))
filename = os.path.dirname(os.path.join(files[0], ''))
return os.path.join(dir_path, filename)
def delete_dir(path: str) -> None:
"""Deletes a directory if exists."""
if fileio.isdir(path):
fileio.rmtree(path)
def write_string_file(file_name: str, string_value: str) -> None:
"""Writes a string to file."""
fileio.makedirs(os.path.dirname(file_name))
with fileio.open(file_name, 'w') as f:
f.write(string_value)
def write_bytes_file(file_name: str, content: bytes) -> None:
"""Writes bytes to file."""
fileio.makedirs(os.path.dirname(file_name))
with fileio.open(file_name, 'wb') as f:
f.write(content)
def write_pbtxt_file(file_name: str, proto: Message) -> None:
"""Writes a text protobuf to file."""
write_string_file(file_name, text_format.MessageToString(proto))
def write_tfrecord_file(file_name: str, *proto: Message) -> None:
"""Writes a serialized tfrecord to file."""
try:
import tensorflow as tf # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception(
'TensorFlow must be installed to use this functionality.') from e
fileio.makedirs(os.path.dirname(file_name))
with tf.io.TFRecordWriter(file_name) as writer:
for message in proto:
writer.write(message.SerializeToString())
# Type for a subclass of message.Message which will be used as a return type.
ProtoMessage = TypeVar('ProtoMessage', bound=Message)
def parse_pbtxt_file(file_name: str, message: ProtoMessage) -> ProtoMessage:
"""Parses a protobuf message from a text file and return message itself."""
contents = fileio.open(file_name).read()
text_format.Parse(contents, message)
return message
def parse_json_file(file_name: str, message: ProtoMessage) -> ProtoMessage:
"""Parses a protobuf message from a JSON file and return itself."""
contents = fileio.open(file_name).read()
json_format.Parse(contents, message)
return message
def load_csv_column_names(csv_file: str) -> List[str]:
"""Parse the first line of a csv file as column names."""
with fileio.open(csv_file) as f:
return f.readline().strip().split(',')
def all_files_pattern(file_pattern: str) -> str:
"""Returns file pattern suitable for Beam to locate multiple files."""
return os.path.join(file_pattern, '*')
def generate_fingerprint(split_name: str, file_pattern: str) -> str:
"""Generates a fingerprint for all files that match the pattern."""
files = fileio.glob(file_pattern)
total_bytes = 0
# Checksum used here is based on timestamp (mtime).
# Checksums are xor'ed and sum'ed over the files so that they are order-
# independent.
xor_checksum = 0
sum_checksum = 0
for f in files:
stat = fileio.stat(f)
total_bytes += stat.length
# Take mtime only up to second-granularity.
mtime = int(stat.mtime_nsec / NANO_PER_SEC)
xor_checksum ^= mtime
sum_checksum += mtime
return 'split:%s,num_files:%d,total_bytes:%d,xor_checksum:%d,sum_checksum:%d' % (
split_name, len(files), total_bytes, xor_checksum, sum_checksum)
def read_string_file(file_name: str) -> str:
"""Reads a string from a file."""
if not fileio.exists(file_name):
msg = '{} does not exist'.format(file_name)
raise FileNotFoundError(msg)
return fileio.open(file_name).read()
def read_bytes_file(file_name: str) -> bytes:
"""Reads bytes from a file."""
if not fileio.exists(file_name):
msg = '{} does not exist'.format(file_name)
raise FileNotFoundError(msg)
return fileio.open(file_name, 'rb').read()
class SchemaReader:
"""Schema reader."""
def read(self, schema_path: str) -> schema_pb2_Schema: # pytype: disable=invalid-annotation
"""Gets a tf.metadata schema.
Args:
schema_path: Path to schema file.
Returns:
A tf.metadata schema.
"""
try:
from tensorflow_metadata.proto.v0 import schema_pb2 # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception('The full "tfx" package must be installed to use this '
'functionality.') from e
result = schema_pb2.Schema()
contents = fileio.open(schema_path).read()
text_format.Parse(contents, result)
return result
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
refs/heads/master
|
galaxy-dist/scripts/extract_dataset_part.py
|
1
|
"""
Reads a JSON file and uses it to call into a datatype class to extract
a subset of a dataset for processing.
Used by jobs that split large files into pieces to be processed concurrently
on a gid in a scatter-gather mode. This does part of the scatter.
"""
import json
import logging
import os
import sys
logging.basicConfig()
log = logging.getLogger( __name__ )
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
# This junk is here to prevent loading errors
import galaxy.model.mapping #need to load this before we unpickle, in order to setup properties assigned by the mappers
galaxy.model.Job() #this looks REAL stupid, but it is REQUIRED in order for SA to insert parameters into the classes defined by the mappers --> it appears that instantiating ANY mapper'ed class would suffice here
def __main__():
"""
Argument: a JSON file
"""
file_path = sys.argv.pop( 1 )
if not os.path.isfile(file_path):
#Nothing to do - some splitters don't write a JSON file
sys.exit(0)
data = json.load(open(file_path, 'r'))
try:
class_name_parts = data['class_name'].split('.')
module_name = '.'.join(class_name_parts[:-1])
class_name = class_name_parts[-1]
mod = __import__(module_name, globals(), locals(), [class_name])
cls = getattr(mod, class_name)
if not cls.process_split_file(data):
sys.stderr.write('Writing split file failed\n')
sys.exit(1)
except Exception, e:
sys.stderr.write(str(e))
sys.exit(1)
__main__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.