repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
EntilZha/ScalaFunctional
|
refs/heads/master
|
functional/lineage.py
|
2
|
from functional.execution import ExecutionEngine
from functional.transformations import CACHE_T
class Lineage(object):
"""
Class for tracking the lineage of transformations, and applying them to a given sequence.
"""
def __init__(self, prior_lineage=None, engine=None):
"""
Construct an empty lineage if prior_lineage is None or if its not use it as the list of
current transformations
:param prior_lineage: Lineage object to inherit
:return: new Lineage object
"""
self.transformations = (
[] if prior_lineage is None else list(prior_lineage.transformations)
)
self.engine = (
(engine or ExecutionEngine())
if prior_lineage is None
else prior_lineage.engine
)
def __repr__(self):
"""
Returns readable representation of Lineage
:return: readable Lineage
"""
return "Lineage: " + " -> ".join(
["sequence"] + [transform.name for transform in self.transformations]
)
def __len__(self):
"""
Number of transformations in lineage
:return: number of transformations
"""
return len(self.transformations)
def __getitem__(self, item):
"""
Return specific transformation in lineage.
:param item: Transformation to retrieve
:return: Requested transformation
"""
return self.transformations[item]
def apply(self, transform):
"""
Add the transformation to the lineage
:param transform: Transformation to apply
"""
self.transformations.append(transform)
def evaluate(self, sequence):
"""
Compute the lineage on the sequence.
:param sequence: Sequence to compute
:return: Evaluated sequence
"""
last_cache_index = self.cache_scan()
transformations = self.transformations[last_cache_index:]
return self.engine.evaluate(sequence, transformations)
def cache_scan(self):
"""
Scan the lineage for the index of the most recent cache.
:return: Index of most recent cache
"""
try:
return len(self.transformations) - self.transformations[::-1].index(CACHE_T)
except ValueError:
return 0
|
rananda/cfme_tests
|
refs/heads/master
|
sprout/appliances/migrations/0018_mismatchversionmailer.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('appliances', '0017_appliancepool_finished'),
]
operations = [
migrations.CreateModel(
name='MismatchVersionMailer',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('template_name', models.CharField(max_length=64)),
('supposed_version', models.CharField(max_length=32)),
('actual_version', models.CharField(max_length=32)),
('sent', models.BooleanField(default=False)),
('provider', models.ForeignKey(to='appliances.Provider', on_delete=models.CASCADE)),
],
),
]
|
40023256/2015cd_midterm-
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/token.py
|
743
|
"""Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
RARROW = 50
ELLIPSIS = 51
OP = 52
ERRORTOKEN = 53
N_TOKENS = 54
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
_main()
|
idumpling/robotx
|
refs/heads/master
|
robotx/core/generator.py
|
4
|
"""RobotX Generator handler"""
import os
import re
import sys
import shutil
from os.path import join
from os.path import exists
from robot.libdoc import LibDoc
from robot.testdoc import TestDoc
import robotx
from robotx.utils import copy
TEMPLATES_PATH = join(robotx.__path__[0], 'template', 'project_template')
RES_TEMPLATE = join(TEMPLATES_PATH, 'resources', 'home_page.txt')
SUITE_TEMPLATE = join(TEMPLATES_PATH,
'cases', '01__webui', '01__wiki_test.txt')
class MyLibDoc(LibDoc):
def _exit(self, rc):
"""By default _exit is run for exiting system.
Disable it."""
pass
class MyTestDoc(TestDoc):
def _exit(self, rc):
"""By default _exit is run for exiting system.
Disable it."""
pass
def generate_project(project):
"""for generating a format project"""
directory = os.getcwd()
template_dir = TEMPLATES_PATH
# error if
if not re.search(r'^[A-Z]\w*$', project):
print 'Error: Project names must begin with a capital letter. \
\nand contain only letters, numbers and underscores.'
sys.exit(1)
elif exists(project):
print "Error: project %r already exists" % project
sys.exit(1)
copy.copy_helper(project, directory, template_dir)
def generate_suite(suite):
"""for generating a format Robot Framework test suite file"""
directory = os.getcwd()
suite_template = SUITE_TEMPLATE
new_suite_path = join(directory, suite)
# error if
if not re.search(r'^[\w]\w*.txt$', suite):
print 'Error: suite name must begin with letter or number. \
\nand must end with .txt \
\nand contain only letters, numbers and underscores.'
sys.exit(1)
elif exists(suite):
print "Error: suite %r already exists" % suite
sys.exit(1)
shutil.copy(suite_template, new_suite_path)
def generate_res(res):
"""for generating a format Robot Framework resource file"""
directory = os.getcwd()
res_template = RES_TEMPLATE
new_res_path = join(directory, res)
# error if
if not re.search(r'^[\w]\w*.txt$', res):
print 'Error: res name must begin with letter or number. \
\nand must end with .txt \
\nand contain only letters, numbers and underscores.'
sys.exit(1)
elif exists(res):
print "Error: resource %r already exists" % res
sys.exit(1)
shutil.copy(res_template, new_res_path)
def generate_docs(doc_project):
"""for generating all cases doc and resources doc"""
if doc_project.endswith('/'):
doc_project = doc_project[:-1]
doc_project = doc_project.capitalize()
project_path = join(os.path.curdir, doc_project)
case_path = join(project_path, 'cases')
res_path = join(project_path, 'resources')
doc_path = join(project_path, 'doc')
# delete all old docs
delete_file_folder(doc_path)
# test case document
print 'Generate %s automated test cases doc ...' % doc_project
case_doc_name = '%s_cases_doc.html' % doc_project.lower()
the_case_doc = join(doc_path, case_doc_name)
MyTestDoc().execute_cli(['--title', '%s Automated Cases Document'
% doc_project,
'--name', 'All automated cases document',
case_path, the_case_doc])
# resources and libs document
print '\nGenerate %s resources, libs docs ...' % doc_project
# get all resources files
res_files = walk_dir(res_path)
res_doc_dir = join(doc_path, 'resource_docs')
if not os.path.exists(res_doc_dir):
os.makedirs(res_doc_dir)
for res in res_files:
if res.find('.py') >= 0:
# temporarily igore lib files
continue
the_res_path = join(res_path, res)
res_doc_name = res.split('.')[0] + '.html'
the_res_doc = join(res_doc_dir, res_doc_name)
MyLibDoc().execute_cli([the_res_path, the_res_doc])
def walk_dir(dir, topdown=True):
"""getting all files under dirs"""
all_files = []
for root, dirs, files in os.walk(dir, topdown):
for name in files:
all_files.append(os.path.join(name))
return all_files
def delete_file_folder(src):
"""delete files and folders"""
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delete_file_folder(itemsrc)
|
schlueter/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/zypper_repository.py
|
7
|
#!/usr/bin/python
# encoding: utf-8
# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
# (c) 2014, Justin Lecher <jlec@gentoo.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zypper_repository
author: "Matthias Vogelgesang (@matze)"
version_added: "1.4"
short_description: Add and remove Zypper repositories
description:
- Add or remove Zypper repositories on SUSE and openSUSE
options:
name:
required: false
default: none
description:
- A name for the repository. Not required when adding repofiles.
repo:
required: false
default: none
description:
- URI of the repository or .repo file. Required when state=present.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
description:
required: false
default: none
description:
- A description of the repository
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
- Needs zypper version >= 1.6.2.
required: false
default: "no"
choices: [ "yes", "no" ]
autorefresh:
description:
- Enable autorefresh of the repository.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "refresh" ]
priority:
description:
- Set priority of repository. Packages will always be installed
from the repository with the smallest priority number.
- Needs zypper version >= 1.12.25.
required: false
version_added: "2.1"
overwrite_multiple:
description:
- Overwrite multiple repository entries, if repositories with both name and
URL already exist.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.1"
auto_import_keys:
description:
- Automatically import the gpg signing key of the new or changed repository.
- Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
- Implies runrefresh.
- Only works with C(.repo) files if `name` is given explicitly.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
runrefresh:
description:
- Refresh the package list of the given repository.
- Can be used with repo=* to refresh all repositories.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.2"
enabled:
description:
- Set repository to enabled (or disabled).
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "2.2"
requirements:
- "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
- python-xml
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: present
# Remove NVIDIA repository
- zypper_repository:
name: nvidia-repo
repo: 'ftp://download.nvidia.com/opensuse/12.2'
state: absent
# Add python development repository
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
# Refresh all repos
- zypper_repository:
repo: '*'
runrefresh: yes
# Add a repo and add it's gpg key
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
auto_import_keys: yes
# Force refresh of a repository
- zypper_repository:
repo: 'http://my_internal_ci_repo/repo'
name: my_ci_repo
state: present
runrefresh: yes
'''
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
def _get_cmd(*args):
"""Combines the non-interactive zypper command with arguments/subcommands"""
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
cmd.extend(args)
return cmd
def _parse_repos(module):
"""parses the output of zypper --xmlout repos and return a parse repo dictionary"""
cmd = _get_cmd('--xmlout', 'repos')
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
repos = []
dom = parseXML(stdout)
repo_list = dom.getElementsByTagName('repo')
for repo in repo_list:
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
# exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
elif rc == 6:
return []
else:
module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
def _repo_changes(realrepo, repocmp):
"Check whether the 2 given repos have different settings."
for k in repocmp:
if repocmp[k] and k not in realrepo:
return True
for k, v in realrepo.items():
if k in repocmp and repocmp[k]:
valold = str(repocmp[k] or "")
valnew = v or ""
if k == "url":
valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
if valold != valnew:
return True
return False
def repo_exists(module, repodata, overwrite_multiple):
"""Check whether the repository already exists.
returns (exists, mod, old_repos)
exists: whether a matching (name, URL) repo exists
mod: whether there are changes compared to the existing repo
old_repos: list of matching repos
"""
existing_repos = _parse_repos(module)
# look for repos that have matching alias or url to the one searched
repos = []
for kw in ['alias', 'url']:
name = repodata[kw]
for oldr in existing_repos:
if repodata[kw] == oldr[kw] and oldr not in repos:
repos.append(oldr)
if len(repos) == 0:
# Repo does not exist yet
return (False, False, None)
elif len(repos) == 1:
# Found an existing repo, look for changes
has_changes = _repo_changes(repos[0], repodata)
return (True, has_changes, repos)
elif len(repos) >= 2:
if overwrite_multiple:
# Found two repos and want to overwrite_multiple
return (True, True, repos)
else:
errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
module.fail_json(msg=errmsg)
def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
"Adds the repo, removes old repos before, that would conflict."
repo = repodata['url']
cmd = _get_cmd('addrepo', '--check')
if repodata['name']:
cmd.extend(['--name', repodata['name']])
# priority on addrepo available since 1.12.25
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
if repodata['priority']:
if zypper_version >= LooseVersion('1.12.25'):
cmd.extend(['--priority', str(repodata['priority'])])
else:
warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
if repodata['enabled'] == '0':
cmd.append('--disable')
# gpgcheck available since 1.6.2
# https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
# the default changed in the past, so don't assume a default here and show warning for old zypper versions
if zypper_version >= LooseVersion('1.6.2'):
if repodata['gpgcheck'] == '1':
cmd.append('--gpgcheck')
else:
cmd.append('--no-gpgcheck')
else:
warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
if repodata['autorefresh'] == '1':
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
cmd.append(repodata['alias'])
if old_repos is not None:
for oldrepo in old_repos:
remove_repo(module, oldrepo['url'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc, stdout, stderr
def remove_repo(module, repo):
"Removes the repo."
cmd = _get_cmd('removerepo', repo)
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def get_zypper_version(module):
rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
if rc != 0 or not stdout.startswith('zypper '):
return LooseVersion('1.0')
return LooseVersion(stdout.split()[1])
def runrefreshrepo(module, auto_import_keys=False, shortname=None):
"Forces zypper to refresh repo metadata."
if auto_import_keys:
cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
else:
cmd = _get_cmd('refresh', '--force')
if shortname is not None:
cmd.extend(['-r', shortname])
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
return rc, stdout, stderr
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
runrefresh=dict(required=False, default='no', type='bool'),
description=dict(required=False),
disable_gpg_check=dict(required=False, default=False, type='bool'),
autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
priority=dict(required=False, type='int'),
enabled=dict(required=False, default=True, type='bool'),
overwrite_multiple=dict(required=False, default=False, type='bool'),
auto_import_keys=dict(required=False, default=False, type='bool'),
),
supports_check_mode=False,
required_one_of=[['state', 'runrefresh']],
)
repo = module.params['repo']
alias = module.params['name']
state = module.params['state']
overwrite_multiple = module.params['overwrite_multiple']
auto_import_keys = module.params['auto_import_keys']
runrefresh = module.params['runrefresh']
zypper_version = get_zypper_version(module)
warnings = [] # collect warning messages for final output
repodata = {
'url': repo,
'alias': alias,
'name': module.params['description'],
'priority': module.params['priority'],
}
# rewrite bools in the language that zypper lr -x provides for easier comparison
if module.params['enabled']:
repodata['enabled'] = '1'
else:
repodata['enabled'] = '0'
if module.params['disable_gpg_check']:
repodata['gpgcheck'] = '0'
else:
repodata['gpgcheck'] = '1'
if module.params['autorefresh']:
repodata['autorefresh'] = '1'
else:
repodata['autorefresh'] = '0'
def exit_unchanged():
module.exit_json(changed=False, repodata=repodata, state=state)
# Check run-time module parameters
if repo == '*' or alias == '*':
if runrefresh:
runrefreshrepo(module, auto_import_keys)
module.exit_json(changed=False, runrefresh=True)
else:
module.fail_json(msg='repo=* can only be used with the runrefresh option.')
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
if state == 'absent' and not repo and not alias:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
if alias:
module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
else:
if not alias and state == "present":
module.fail_json(msg='Name required when adding non-repo files.')
exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
if repo:
shortname = repo
else:
shortname = alias
if state == 'present':
if exists and not mod:
if runrefresh:
runrefreshrepo(module, auto_import_keys, shortname)
exit_unchanged()
rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
if rc == 0 and (runrefresh or auto_import_keys):
runrefreshrepo(module, auto_import_keys, shortname)
elif state == 'absent':
if not exists:
exit_unchanged()
rc, stdout, stderr = remove_repo(module, shortname)
if rc == 0:
module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
else:
module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
if __name__ == '__main__':
main()
|
nityas/6869-finalproject
|
refs/heads/master
|
src/lenet_old.py
|
1
|
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import os
import sys
import time
import gzip
import numpy
import itertools
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import lenet
from lenet.LogisticRegression import *
from lenet.LeNetConvPoolLayer import *
from lenet.HiddenLayer import *
from logreg import cnn_training_set
from logreg import cnn_testing_set
HOG_TRAINING_DATA = 'data/hog_training_data.npy'
HOG_TRAINING_LABELS = 'data/hog_training_labels.npy'
HOG_TESTING_DATA = 'data/hog_testing_data.npy'
HOG_TESTING_LABELS = 'data/hog_testing_labels.npy'
IMG2D_TRAINING_DATA = 'data/img2d_training_data.npy'
IMG2D_TRAINING_LABELS = 'data/img2d_training_labels.npy'
IMG2D_TESTING_DATA = 'data/img2d_testing_data.npy'
IMG2D_TESTING_LABELS = 'data/img2d_testing_labels.npy'
def evaluate_lenet(learning_rate=0.1, n_epochs=200,
dataset='res/mnist.pkl',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
train_set_y_np = numpy.load(IMG2D_TRAINING_LABELS)
train_set_x_np = numpy.load(IMG2D_TRAINING_DATA)
train_set_x = theano.shared(value=train_set_x_np, name='train_set_x')
test_set_y_np = numpy.load(IMG2D_TESTING_LABELS)
test_set_y = theano.shared(value=test_set_y_np, name='test_set_y')
test_set_x_np = numpy.load(IMG2D_TESTING_DATA)
test_set_x = theano.shared(value=test_set_x_np, name='test_set_x')
# F = len(numpy.unique(train_set_y_np))
# N = len(train_set_x_np)
# tr_y = numpy.zeros((N, F))
# tr_y[(range(N), train_set_y_np-1)] = 1.0
train_set_y = theano.shared(value=train_set_y_np, name='train_set_y', broadcastable=(False, True))
# compute number of minibatches for training, validation and testing
n_train_batches = len(train_set_x_np)
n_test_batches = len(test_set_x_np)
n_train_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar('index') # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (nkerns[0], nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
get_errors = theano.function(
[index],
layer3.errors(y,frac=True),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
# validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_iter = 0
test_score = 0.
start_time = time.clock()
maxiter = 1000
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print "epoch ", epoch
old_impr = test_score
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
test_score = [get_errors(i) for i in xrange(n_test_batches)]
pred = test_score[0][0]
actual = test_score[0][1]
n = len(numpy.unique(actual))
test_score = float(numpy.mean(test_losses))
print 'iter ', iter,': accuracy= ', test_score[0]
print "Confusion Matrix:"
print numpy.array([zip(actual,pred).count(x) for x in itertools.product(list(set(actual)),repeat=2)]).reshape(n,n)
if test_score-old_impr < 0.01:
done_looping = True
break
end_time = time.clock()
print('Optimization complete.')
print('with test performance %f %%' % (test_score * 100.))
if __name__ == '__main__':
evaluate_lenet(dataset='English')
|
friendzj/GoTest
|
refs/heads/master
|
src/py/SerialOpt.py
|
1
|
# -*- coding: UTF-8 -*-
import time
import threading
import serial
import logging
class SerOpt:
ser = 0
comInfo = ''
BandRate = 0
__LastRecv = 0
__PackDat = []
__recvStatus = 0
__status = {'SOP':0,'LEN':1,'DAT':2}
__packetLen = 0
__ParseDatFun = 0
def __init__(self,comInfo,Bandrate):
self.comInfo = comInfo;
self.Bandrate = Bandrate;
self.__recvStatus = self.__status['SOP']
def SetCallBack(self,ParseDatFun):
self.__ParseDatFun = ParseDatFun
def OpenSer(self):
if self.ser and self.ser.isOpen():
self.ser.Close();
self.ser = serial.Serial(self.comInfo,self.Bandrate,timeout=3)
if self.ser:
t1 = threading.Thread(target=self.recvThreadFun)
t1.daemon = True
t1.start()
logging.info("open "+self.comInfo+" success")
return 1;
else:
logging.warning("open "+self.comInfo+" failure")
return 0;
def getDat(self):
if self.ser == 0:
return ''
def Send(self,dat):
#print ('send'+str(self.ser))
if self.ser:
SendLen = self.ser.write(dat)
return SendLen
return 0
def CleanDat(self):
self.__PackDat = [0]*64
self.__recvStatus = self.__status['SOP']
def recvThreadFun(self):
while 1:
if self.ser:
dat = self.ser.read(1)
if len(dat) == 0:
continue
dat = ord(dat)
print hex(dat)
__LastRecv = time.clock()
if self.__recvStatus == self.__status['SOP']:
print('1')
if dat == 0xFE:
print('2')
self.__recvStatus = self.__status['LEN']
elif self.__recvStatus == self.__status['LEN']:
print('3')
self.__recvStatus = self.__status['DAT']
self.__PackDat.append(dat)
self.__packetLen = dat + 5
elif self.__recvStatus == self.__status['DAT']:
self.__PackDat.append(dat)
self.__packetLen -= 1
if self.__packetLen == 0:
print('data len'+str(len(self.__PackDat)))
self.__ParseDatFun(self.__PackDat)
self.CleanDat();
print self.__recvStatus,self.__packetLen
def Close(self):
self.ser.close()
self.ser = 0;
def PrintDat(Dat):
print 'data:'
print Dat
if __name__=='__main__':
opt = SerOpt('com7',115200)
time.clock()
opt.SetCallBack(PrintDat)
if not opt.OpenSer():
print 'open failure'
else:
while 1:
time.sleep(0.1)
|
ryfeus/lambda-packs
|
refs/heads/master
|
Keras_tensorflow_nightly/source2.7/google/protobuf/internal/text_format_test.py
|
19
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = 'kenton@google.com (Kenton Varda)'
import math
import re
import six
import string
try:
import unittest2 as unittest # PY26, pylint: disable=g-import-not-at-top
except ImportError:
import unittest # pylint: disable=g-import-not-at-top
from google.protobuf.internal import _parameterized
from google.protobuf import any_test_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import any_test_pb2 as test_extend_any
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf.internal import test_util
from google.protobuf import descriptor_pool
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def testQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile(r'\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642', 'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(
*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message,
as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testPrintField(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintField(field, value, out)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintField(field, value)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
def testPrintFieldValue(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintFieldValue(field, value, out)
self.assertEqual('0.0', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintFieldValue(field, value)
self.assertEqual('0.0', out.getvalue())
out.close()
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseAndMergeUtf8(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
ascii_text = ascii_text.encode('utf-8')
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
parsed_message.Clear()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
if six.PY2:
msg2 = message_module.TestAllTypes()
text = (u'optional_string: "café"')
text_format.Merge(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
msg2.Clear()
text_format.Parse(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedMessageShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_nested_message: [{bb: 100}, {bb: 200}],\n'
'repeated_nested_message: {bb: 300}\n'
'repeated_nested_message [{bb: 400}];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_nested_message[0].bb)
self.assertEqual(200, message.repeated_nested_message[1].bb)
self.assertEqual(300, message.repeated_nested_message[2].bb)
self.assertEqual(400, message.repeated_nested_message[3].bb)
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(e.exception.GetLine(), 1)
self.assertEqual(e.exception.GetColumn(), 28)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'), text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'), text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'), text_format.Parse,
text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self, text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testMergeMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
text_format.Merge(m_string, m2)
self.assertEqual('oneof_string', m2.WhichOneof('oneof_field'))
def testParseMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
with self.assertRaisesRegexp(text_format.ParseError,
' is specified along with field '):
text_format.Parse(m_string, m2)
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_data_oneof_implemented.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string['abc'] = '123'
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message), 'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join(('map_string_string {\n key: "%c"\n value: "dummy"\n}\n'
% (letter,) for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# TODO(teboring): In c/137553523, not serializing default value for map entry
# message has been fixed. This test needs to be disabled in order to submit
# that cl. Add this back when c/137553523 has been submitted.
# def testMapOrderSemantics(self):
# golden_lines = self.ReadGolden('map_test_data.txt')
# message = map_unittest_pb2.TestMap()
# text_format.ParseLines(golden_lines, message)
# candidate = text_format.MessageToString(message)
# # The Python implementation emits "1.0" for the double value that the C++
# # implementation emits as "1".
# candidate = candidate.replace('1.0', '1', 2)
# candidate = candidate.replace('0.0', '0', 2)
# self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message), 'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testExtensionInsideAnyMessage(self):
message = test_extend_any.TestAny()
text = ('value {\n'
' [type.googleapis.com/google.protobuf.internal.TestAny] {\n'
' [google.protobuf.internal.TestAnyExtension1.extension1] {\n'
' i: 10\n'
' }\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
self.CompareToGoldenText(
text_format.MessageToString(
message, descriptor_pool=descriptor_pool.Default()),
text)
def testParseMessageByFieldNumber(self):
message = unittest_pb2.TestAllTypes()
text = ('34: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message, allow_field_number=True)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_field_number=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
# Can't parse field number without set allow_field_number=True.
message = unittest_pb2.TestAllTypes()
text = '34:1\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"34".'), text_format.Parse, text, message)
# Can't parse if field number is not found.
text = '1234:1\n'
six.assertRaisesRegex(
self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"1234".'),
text_format.Parse,
text,
message,
allow_field_number=True)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' bin: "\xe0"'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' x: x\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext.ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Parse known extension correcty.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadIdentifier(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { "bb": 1 }')
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(str(e.exception),
'1:27 : Expected identifier or number, got "bb".')
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self, text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self, text_format.ParseError, (
'1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'), text_format.Parse, text, message)
def testParseNumericUnknownEnum(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'), text_format.Parse,
text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'), text_format.Parse, text,
message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'), text_format.Parse, text,
message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual('123', message.map_string_string['abc'])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class Proto3Tests(unittest.TestCase):
def testPrintMessageExpandAny(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
descriptor_pool=descriptor_pool.Default()),
'any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyRepeated(self):
packed_message = unittest_pb2.OneString()
message = any_test_pb2.TestAny()
packed_message.data = 'string0'
message.repeated_any_value.add().Pack(packed_message)
packed_message.data = 'string1'
message.repeated_any_value.add().Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message),
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyDescriptorPoolMissingType(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
empty_pool = descriptor_pool.DescriptorPool()
self.assertEqual(
text_format.MessageToString(message, descriptor_pool=empty_pool),
'any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
def testPrintMessageExpandAnyPointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
pointy_brackets=True),
'any_value <\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'>\n')
def testPrintMessageExpandAnyAsOneLine(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True),
'any_value {'
' [type.googleapis.com/protobuf_unittest.OneString]'
' { data: "string" } '
'}')
def testPrintMessageExpandAnyAsOneLinePointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True,
pointy_brackets=True,
descriptor_pool=descriptor_pool.Default()),
'any_value <'
' [type.googleapis.com/protobuf_unittest.OneString]'
' < data: "string" > '
'>')
def testUnknownEnums(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
message2 = unittest_proto3_arena_pb2.TestAllTypes()
message.optional_nested_enum = 999
text_string = text_format.MessageToString(message)
text_format.Parse(text_string, message2)
self.assertEqual(999, message2.optional_nested_enum)
def testMergeExpandedAny(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
message.Clear()
text_format.Parse(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeExpandedAnyRepeated(self):
message = any_test_pb2.TestAny()
text = ('repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.repeated_any_value[0].Unpack(packed_message)
self.assertEqual('string0', packed_message.data)
message.repeated_any_value[1].Unpack(packed_message)
self.assertEqual('string1', packed_message.data)
def testMergeExpandedAnyPointyBrackets(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeAlternativeUrl(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.otherapi.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
self.assertEqual('type.otherapi.com/protobuf_unittest.OneString',
message.any_value.type_url)
def testMergeExpandedAnyDescriptorPoolMissingType(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
with self.assertRaises(text_format.ParseError) as e:
empty_pool = descriptor_pool.DescriptorPool()
text_format.Merge(text, message, descriptor_pool=empty_pool)
self.assertEqual(
str(e.exception),
'Type protobuf_unittest.OneString not found in descriptor pool')
def testMergeUnexpandedAny(self):
text = ('any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
message = any_test_pb2.TestAny()
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeMissingAnyEndToken(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n')
with self.assertRaises(text_format.ParseError) as e:
text_format.Merge(text, message)
self.assertEqual(str(e.exception), '3:11 : Expected "}".')
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f '
'False_bool: False True_bool: True X:iNf Y:-inF Z:nAN')
tokenizer = text_format.Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'), ':',
(tokenizer.ConsumeInteger, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'), ':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'), ':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'), ':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'), ':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'), ':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'), ':', '{',
(tokenizer.ConsumeIdentifier, 'A'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'), ':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'), ':',
(tokenizer.ConsumeBool, False), '}',
(tokenizer.ConsumeIdentifier, 'ID9'), ':',
(tokenizer.ConsumeInteger, 22),
(tokenizer.ConsumeIdentifier, 'ID10'), ':',
(tokenizer.ConsumeInteger, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'), ':',
(tokenizer.ConsumeInteger, -22),
(tokenizer.ConsumeIdentifier, 'ID12'), ':',
(tokenizer.ConsumeInteger, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'), ':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'), ':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'False_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'True_bool'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'X'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'Y'), ':',
(tokenizer.ConsumeFloat, float('-inf')),
(tokenizer.ConsumeIdentifier, 'Z'), ':',
(tokenizer.ConsumeFloat, float('nan'))]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if isinstance(m, str):
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
elif isinstance(m[1], float) and math.isnan(m[1]):
self.assertTrue(math.isnan(m[0]()))
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeAbstractIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(-1, tokenizer.ConsumeInteger())
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger())
self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger())
self.assertTrue(tokenizer.AtEnd())
text = '-0 0 0 1.2'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(True, tokenizer.TryConsumeInteger())
self.assertEqual(False, tokenizer.TryConsumeInteger())
with self.assertRaises(text_format.ParseError):
tokenizer.ConsumeInteger()
self.assertEqual(1.2, tokenizer.ConsumeFloat())
self.assertTrue(tokenizer.AtEnd())
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint64, tokenizer)
self.assertEqual(-1, text_format._ConsumeInt32(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt32, tokenizer)
self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt64, tokenizer)
self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
def testSkipComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines())
self.assertTrue(tokenizer.AtEnd())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
def testConsumeComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# another comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTrailingComment(self):
text = 'some_number: 4\n# some comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeLineComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoLineComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# another comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeAndCheckTrailingComment(self):
text = 'some_number: 4 # some comment' # trailing comment on the same line
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((True, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testHashinComment(self):
text = 'some_number: 4 # some comment # not a new comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertEqual((True, '# some comment # not a new comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
# Tests for pretty printer functionality.
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class PrettyPrinterTest(TextFormatBase):
def testPrettyPrintNoMatch(self, message_module):
def printer(message, indent, as_one_line):
del message, indent, as_one_line
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { bb: 42 }')
def testPrettyPrintOneLine(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is 42 }')
def testPrettyPrintMultiLine(self, message_module):
def printer(m, indent, as_one_line):
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
line_deliminator = (' ' if as_one_line else '\n') + ' ' * indent
return 'My lucky number is:%s%s' % (line_deliminator, m.bb)
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is: 42 }')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'repeated_nested_message {\n My lucky number is:\n 42\n}\n')
def testPrettyPrintEntireMessage(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.DESCRIPTOR:
return 'The is the message!'
return None
message = message_module.TestAllTypes()
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'The is the message!\n')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'The is the message!')
def testPrettyPrintMultipleParts(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
return None
message = message_module.TestAllTypes()
message.optional_int32 = 61
msg = message.repeated_nested_message.add()
msg.bb = 42
msg = message.repeated_nested_message.add()
msg.bb = 99
msg = message.optional_nested_message
msg.bb = 1
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
('optional_int32: 61 '
'optional_nested_message { My lucky number is 1 } '
'repeated_nested_message { My lucky number is 42 } '
'repeated_nested_message { My lucky number is 99 }'))
if __name__ == '__main__':
unittest.main()
|
Mirantis/openstack-dashboard
|
refs/heads/master
|
django-openstack/src/django_openstack/templatetags/templatetags/branding.py
|
5
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for working with django_openstack.
"""
from django import template
from django.conf import settings
register = template.Library()
class SiteBrandingNode(template.Node):
def render(self, context):
return settings.SITE_BRANDING
@register.tag
def site_branding(parser, token):
return SiteBrandingNode()
# TODO(jeffjapan): This is just an assignment tag version of the above, replace
# when the dashboard is upgraded to a django version that
# supports the @assignment_tag decorator syntax instead.
class SaveBrandingNode(template.Node):
def __init__(self, var_name):
self.var_name = var_name
def render(self, context):
context[self.var_name] = settings.SITE_BRANDING
return ""
@register.tag
def save_site_branding(parser, token):
tagname = token.contents.split()
return SaveBrandingNode(tagname[-1])
|
40223209/test111
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/gc.py
|
743
|
"""This module provides access to the garbage collector for reference cycles.
enable() -- Enable automatic garbage collection.
disable() -- Disable automatic garbage collection.
isenabled() -- Returns true if automatic collection is enabled.
collect() -- Do a full collection right now.
get_count() -- Return the current collection counts.
set_debug() -- Set debugging flags.
get_debug() -- Get debugging flags.
set_threshold() -- Set the collection thresholds.
get_threshold() -- Return the current the collection thresholds.
get_objects() -- Return a list of all objects tracked by the collector.
is_tracked() -- Returns true if a given object is tracked.
get_referrers() -- Return the list of objects that refer to an object.
get_referents() -- Return the list of objects that an object refers to.
"""
DEBUG_COLLECTABLE = 2
DEBUG_LEAK = 38
DEBUG_SAVEALL = 32
DEBUG_STATS = 1
DEBUG_UNCOLLECTABLE = 4
class __loader__:
pass
callbacks = []
def collect(*args,**kw):
"""collect([generation]) -> n
With no arguments, run a full collection. The optional argument
may be an integer specifying which generation to collect. A ValueError
is raised if the generation number is invalid.
The number of unreachable objects is returned.
"""
pass
def disable(*args,**kw):
"""disable() -> None
Disable automatic garbage collection.
"""
pass
def enable(*args,**kw):
"""enable() -> None
Enable automatic garbage collection.
"""
pass
garbage = []
def get_count(*args,**kw):
"""get_count() -> (count0, count1, count2)
Return the current collection counts
"""
pass
def get_debug(*args,**kw):
"""get_debug() -> flags
Get the garbage collection debugging flags.
"""
pass
def get_objects(*args,**kw):
"""get_objects() -> [...]
Return a list of objects tracked by the collector (excluding the list
returned).
"""
pass
def get_referents(*args,**kw):
"""get_referents(*objs) -> list Return the list of objects that are directly referred to by objs."""
pass
def get_referrers(*args,**kw):
"""get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs."""
pass
def get_threshold(*args,**kw):
"""get_threshold() -> (threshold0, threshold1, threshold2)
Return the current collection thresholds
"""
pass
def is_tracked(*args,**kw):
"""is_tracked(obj) -> bool
Returns true if the object is tracked by the garbage collector.
Simple atomic objects will return false.
"""
pass
def isenabled(*args,**kw):
"""isenabled() -> status
Returns true if automatic garbage collection is enabled.
"""
pass
def set_debug(*args,**kw):
"""set_debug(flags) -> None
Set the garbage collection debugging flags. Debugging information is
written to sys.stderr.
flags is an integer and can have the following bits turned on:
DEBUG_STATS - Print statistics during collection.
DEBUG_COLLECTABLE - Print collectable objects found.
DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.
DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
DEBUG_LEAK - Debug leaking programs (everything but STATS).
"""
pass
def set_threshold(*args,**kw):
"""set_threshold(threshold0, [threshold1, threshold2]) -> None
Sets the collection thresholds. Setting threshold0 to zero disables
collection.
"""
pass
|
ncdesouza/bookworm
|
refs/heads/master
|
env/lib/python2.7/site-packages/sqlalchemy/ext/instrumentation.py
|
56
|
"""Extensible class instrumentation.
The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate
systems of class instrumentation within the ORM. Class instrumentation
refers to how the ORM places attributes on the class which maintain
data and track changes to that data, as well as event hooks installed
on the class.
.. note::
The extension package is provided for the benefit of integration
with other object management packages, which already perform
their own instrumentation. It is not intended for general use.
For examples of how the instrumentation extension is used,
see the example :ref:`examples_instrumentation`.
.. versionchanged:: 0.8
The :mod:`sqlalchemy.orm.instrumentation` was split out so
that all functionality having to do with non-standard
instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`.
When imported, the module installs itself within
:mod:`sqlalchemy.orm.instrumentation` so that it
takes effect, including recognition of
``__sa_instrumentation_manager__`` on mapped classes, as
well :data:`.instrumentation_finders`
being used to determine class instrumentation resolution.
"""
from ..orm import instrumentation as orm_instrumentation
from ..orm.instrumentation import (
ClassManager, InstrumentationFactory, _default_state_getter,
_default_dict_getter, _default_manager_getter
)
from ..orm import attributes, collections, base as orm_base
from .. import util
from ..orm import exc as orm_exc
import weakref
INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an InstrumentationManager or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a ClassManager or subclass
This attribute is consulted by SQLAlchemy instrumentation
resolution, once the :mod:`sqlalchemy.ext.instrumentation` module
has been imported. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders = [find_native_user_instrumentation_hook]
"""An extensible sequence of callables which return instrumentation
implementations
When a class is registered, each callable will be passed a class object.
If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class ExtendedInstrumentationRegistry(InstrumentationFactory):
"""Extends :class:`.InstrumentationFactory` with additional
bookkeeping, to accommodate multiple types of
class managers.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = weakref.WeakKeyDictionary()
_dict_finders = weakref.WeakKeyDictionary()
_extended = False
def _locate_extended_factory(self, class_):
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
manager = self._extended_class_manager(class_, factory)
return manager, factory
else:
return None, None
def _check_conflicts(self, class_, factory):
existing_factories = self._collect_management_factories_for(class_).\
difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r" % (
class_.__name__, list(existing_factories)))
def _extended_class_manager(self, class_, factory):
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_instrumented_lookups()
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a
hierarchy.
Traverses the entire inheritance graph of a cls and returns a
collection of instrumentation factories for those classes. Factories
are extracted from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = self.manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def unregister(self, class_):
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
super(ExtendedInstrumentationRegistry, self).unregister(class_)
def manager_of_class(self, cls):
if cls is None:
return None
return self._manager_finders.get(cls, _default_manager_getter)(cls)
def state_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._state_finders.get(
instance.__class__, _default_state_getter)(instance)
def dict_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._dict_finders.get(
instance.__class__, _default_dict_getter)(instance)
orm_instrumentation._instrumentation_factory = \
_instrumentation_factory = ExtendedInstrumentationRegistry()
orm_instrumentation.instrumentation_finders = instrumentation_finders
class InstrumentationManager(object):
"""User-defined class instrumentation extension.
:class:`.InstrumentationManager` can be subclassed in order
to change
how class instrumentation proceeds. This class exists for
the purposes of integration with other object management
frameworks which would like to entirely modify the
instrumentation methodology of the ORM, and is not intended
for regular usage. For interception of class instrumentation
events, see :class:`.InstrumentationEvents`.
The API for this class should be considered as semi-stable,
and may change slightly with new releases.
.. versionchanged:: 0.8
:class:`.InstrumentationManager` was moved from
:mod:`sqlalchemy.orm.instrumentation` to
:mod:`sqlalchemy.ext.instrumentation`.
"""
# r4361 added a mandatory (cls) constructor to this interface.
# given that, perhaps class_ should be dropped from all of these
# signatures.
def __init__(self, class_):
pass
def manage(self, class_, manager):
setattr(class_, '_default_class_manager', manager)
def dispose(self, class_, manager):
delattr(class_, '_default_class_manager')
def manager_getter(self, class_):
def get(cls):
return cls._default_class_manager
return get
def instrument_attribute(self, class_, key, inst):
pass
def post_configure_attribute(self, class_, key, inst):
pass
def install_descriptor(self, class_, key, inst):
setattr(class_, key, inst)
def uninstall_descriptor(self, class_, key):
delattr(class_, key)
def install_member(self, class_, key, implementation):
setattr(class_, key, implementation)
def uninstall_member(self, class_, key):
delattr(class_, key)
def instrument_collection_class(self, class_, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def get_instance_dict(self, class_, instance):
return instance.__dict__
def initialize_instance_dict(self, class_, instance):
pass
def install_state(self, class_, instance, state):
setattr(instance, '_default_state', state)
def remove_state(self, class_, instance):
delattr(instance, '_default_state')
def state_getter(self, class_):
return lambda instance: getattr(instance, '_default_state')
def dict_getter(self, class_):
return lambda inst: self.get_instance_dict(class_, inst)
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_)
def manage(self):
self._adapted.manage(self.class_, self)
def dispose(self):
self._adapted.dispose(self.class_)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, 'initialize_collection', None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(self, key,
state, factory)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._state_constructor(instance, self)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
self._get_state(instance)
except orm_exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
def _install_instrumented_lookups():
"""Replace global class/object management functions
with ExtendedInstrumentationRegistry implementations, which
allow multiple types of class managers to be present,
at the cost of performance.
This function is called only by ExtendedInstrumentationRegistry
and unit tests specific to this behavior.
The _reinstall_default_lookups() function can be called
after this one to re-establish the default functions.
"""
_install_lookups(
dict(
instance_state=_instrumentation_factory.state_of,
instance_dict=_instrumentation_factory.dict_of,
manager_of_class=_instrumentation_factory.manager_of_class
)
)
def _reinstall_default_lookups():
"""Restore simplified lookups."""
_install_lookups(
dict(
instance_state=_default_state_getter,
instance_dict=_default_dict_getter,
manager_of_class=_default_manager_getter
)
)
def _install_lookups(lookups):
global instance_state, instance_dict, manager_of_class
instance_state = lookups['instance_state']
instance_dict = lookups['instance_dict']
manager_of_class = lookups['manager_of_class']
orm_base.instance_state = attributes.instance_state = \
orm_instrumentation.instance_state = instance_state
orm_base.instance_dict = attributes.instance_dict = \
orm_instrumentation.instance_dict = instance_dict
orm_base.manager_of_class = attributes.manager_of_class = \
orm_instrumentation.manager_of_class = manager_of_class
|
vitaly4uk/django
|
refs/heads/master
|
tests/sitemaps_tests/urls/https.py
|
397
|
from django.conf.urls import url
from django.contrib.sitemaps import views
from .http import SimpleSitemap
class HTTPSSitemap(SimpleSitemap):
protocol = 'https'
secure_sitemaps = {
'simple': HTTPSSitemap,
}
urlpatterns = [
url(r'^secure/index\.xml$', views.index, {'sitemaps': secure_sitemaps}),
url(r'^secure/sitemap-(?P<section>.+)\.xml$', views.sitemap,
{'sitemaps': secure_sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
]
|
randybias/beets
|
refs/heads/master
|
beets/util/bluelet.py
|
25
|
"""Extremely simple pure-Python implementation of coroutine-style
asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
Bluelet can also be thought of as a less-terrible replacement for
asyncore.
Bluelet: easy concurrency without all the messy parallelism.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import socket
import select
import sys
import types
import errno
import traceback
import time
import collections
# A little bit of "six" (Python 2/3 compatibility): cope with PEP 3109 syntax
# changes.
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(typ, exc, tb):
raise exc.with_traceback(tb)
else:
exec("""
def _reraise(typ, exc, tb):
raise typ, exc, tb
""")
# Basic events used for thread scheduling.
class Event(object):
"""Just a base class identifying Bluelet events. An event is an
object yielded from a Bluelet thread coroutine to suspend operation
and communicate with the scheduler.
"""
pass
class WaitableEvent(Event):
"""A waitable event is one encapsulating an action that can be
waited for using a select() call. That is, it's an event with an
associated file descriptor.
"""
def waitables(self):
"""Return "waitable" objects to pass to select(). Should return
three iterables for input readiness, output readiness, and
exceptional conditions (i.e., the three lists passed to
select()).
"""
return (), (), ()
def fire(self):
"""Called when an associated file descriptor becomes ready
(i.e., is returned from a select() call).
"""
pass
class ValueEvent(Event):
"""An event that does nothing but return a fixed value."""
def __init__(self, value):
self.value = value
class ExceptionEvent(Event):
"""Raise an exception at the yield point. Used internally."""
def __init__(self, exc_info):
self.exc_info = exc_info
class SpawnEvent(Event):
"""Add a new coroutine thread to the scheduler."""
def __init__(self, coro):
self.spawned = coro
class JoinEvent(Event):
"""Suspend the thread until the specified child thread has
completed.
"""
def __init__(self, child):
self.child = child
class KillEvent(Event):
"""Unschedule a child thread."""
def __init__(self, child):
self.child = child
class DelegationEvent(Event):
"""Suspend execution of the current thread, start a new thread and,
once the child thread finished, return control to the parent
thread.
"""
def __init__(self, coro):
self.spawned = coro
class ReturnEvent(Event):
"""Return a value the current thread's delegator at the point of
delegation. Ends the current (delegate) thread.
"""
def __init__(self, value):
self.value = value
class SleepEvent(WaitableEvent):
"""Suspend the thread for a given duration.
"""
def __init__(self, duration):
self.wakeup_time = time.time() + duration
def time_left(self):
return max(self.wakeup_time - time.time(), 0.0)
class ReadEvent(WaitableEvent):
"""Reads from a file-like object."""
def __init__(self, fd, bufsize):
self.fd = fd
self.bufsize = bufsize
def waitables(self):
return (self.fd,), (), ()
def fire(self):
return self.fd.read(self.bufsize)
class WriteEvent(WaitableEvent):
"""Writes to a file-like object."""
def __init__(self, fd, data):
self.fd = fd
self.data = data
def waitable(self):
return (), (self.fd,), ()
def fire(self):
self.fd.write(self.data)
# Core logic for executing and scheduling threads.
def _event_select(events):
"""Perform a select() over all the Events provided, returning the
ones ready to be fired. Only WaitableEvents (including SleepEvents)
matter here; all other events are ignored (and thus postponed).
"""
# Gather waitables and wakeup times.
waitable_to_event = {}
rlist, wlist, xlist = [], [], []
earliest_wakeup = None
for event in events:
if isinstance(event, SleepEvent):
if not earliest_wakeup:
earliest_wakeup = event.wakeup_time
else:
earliest_wakeup = min(earliest_wakeup, event.wakeup_time)
elif isinstance(event, WaitableEvent):
r, w, x = event.waitables()
rlist += r
wlist += w
xlist += x
for waitable in r:
waitable_to_event[('r', waitable)] = event
for waitable in w:
waitable_to_event[('w', waitable)] = event
for waitable in x:
waitable_to_event[('x', waitable)] = event
# If we have a any sleeping threads, determine how long to sleep.
if earliest_wakeup:
timeout = max(earliest_wakeup - time.time(), 0.0)
else:
timeout = None
# Perform select() if we have any waitables.
if rlist or wlist or xlist:
rready, wready, xready = select.select(rlist, wlist, xlist, timeout)
else:
rready, wready, xready = (), (), ()
if timeout:
time.sleep(timeout)
# Gather ready events corresponding to the ready waitables.
ready_events = set()
for ready in rready:
ready_events.add(waitable_to_event[('r', ready)])
for ready in wready:
ready_events.add(waitable_to_event[('w', ready)])
for ready in xready:
ready_events.add(waitable_to_event[('x', ready)])
# Gather any finished sleeps.
for event in events:
if isinstance(event, SleepEvent) and event.time_left() == 0.0:
ready_events.add(event)
return ready_events
class ThreadException(Exception):
def __init__(self, coro, exc_info):
self.coro = coro
self.exc_info = exc_info
def reraise(self):
_reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
class Delegated(Event):
"""Placeholder indicating that a thread has delegated execution to a
different thread.
"""
def __init__(self, child):
self.child = child
def run(root_coro):
"""Schedules a coroutine, running it to completion. This
encapsulates the Bluelet scheduler, which the root coroutine can
add to by spawning new coroutines.
"""
# The "threads" dictionary keeps track of all the currently-
# executing and suspended coroutines. It maps coroutines to their
# currently "blocking" event. The event value may be SUSPENDED if
# the coroutine is waiting on some other condition: namely, a
# delegated coroutine or a joined coroutine. In this case, the
# coroutine should *also* appear as a value in one of the below
# dictionaries `delegators` or `joiners`.
threads = {root_coro: ValueEvent(None)}
# Maps child coroutines to delegating parents.
delegators = {}
# Maps child coroutines to joining (exit-waiting) parents.
joiners = collections.defaultdict(list)
def complete_thread(coro, return_value):
"""Remove a coroutine from the scheduling pool, awaking
delegators and joiners as necessary and returning the specified
value to any delegating parent.
"""
del threads[coro]
# Resume delegator.
if coro in delegators:
threads[delegators[coro]] = ValueEvent(return_value)
del delegators[coro]
# Resume joiners.
if coro in joiners:
for parent in joiners[coro]:
threads[parent] = ValueEvent(None)
del joiners[coro]
def advance_thread(coro, value, is_exc=False):
"""After an event is fired, run a given coroutine associated with
it in the threads dict until it yields again. If the coroutine
exits, then the thread is removed from the pool. If the coroutine
raises an exception, it is reraised in a ThreadException. If
is_exc is True, then the value must be an exc_info tuple and the
exception is thrown into the coroutine.
"""
try:
if is_exc:
next_event = coro.throw(*value)
else:
next_event = coro.send(value)
except StopIteration:
# Thread is done.
complete_thread(coro, None)
except:
# Thread raised some other exception.
del threads[coro]
raise ThreadException(coro, sys.exc_info())
else:
if isinstance(next_event, types.GeneratorType):
# Automatically invoke sub-coroutines. (Shorthand for
# explicit bluelet.call().)
next_event = DelegationEvent(next_event)
threads[coro] = next_event
def kill_thread(coro):
"""Unschedule this thread and its (recursive) delegates.
"""
# Collect all coroutines in the delegation stack.
coros = [coro]
while isinstance(threads[coro], Delegated):
coro = threads[coro].child
coros.append(coro)
# Complete each coroutine from the top to the bottom of the
# stack.
for coro in reversed(coros):
complete_thread(coro, None)
# Continue advancing threads until root thread exits.
exit_te = None
while threads:
try:
# Look for events that can be run immediately. Continue
# running immediate events until nothing is ready.
while True:
have_ready = False
for coro, event in list(threads.items()):
if isinstance(event, SpawnEvent):
threads[event.spawned] = ValueEvent(None) # Spawn.
advance_thread(coro, None)
have_ready = True
elif isinstance(event, ValueEvent):
advance_thread(coro, event.value)
have_ready = True
elif isinstance(event, ExceptionEvent):
advance_thread(coro, event.exc_info, True)
have_ready = True
elif isinstance(event, DelegationEvent):
threads[coro] = Delegated(event.spawned) # Suspend.
threads[event.spawned] = ValueEvent(None) # Spawn.
delegators[event.spawned] = coro
have_ready = True
elif isinstance(event, ReturnEvent):
# Thread is done.
complete_thread(coro, event.value)
have_ready = True
elif isinstance(event, JoinEvent):
threads[coro] = SUSPENDED # Suspend.
joiners[event.child].append(coro)
have_ready = True
elif isinstance(event, KillEvent):
threads[coro] = ValueEvent(None)
kill_thread(event.child)
have_ready = True
# Only start the select when nothing else is ready.
if not have_ready:
break
# Wait and fire.
event2coro = dict((v, k) for k, v in threads.items())
for event in _event_select(threads.values()):
# Run the IO operation, but catch socket errors.
try:
value = event.fire()
except socket.error as exc:
if isinstance(exc.args, tuple) and \
exc.args[0] == errno.EPIPE:
# Broken pipe. Remote host disconnected.
pass
else:
traceback.print_exc()
# Abort the coroutine.
threads[event2coro[event]] = ReturnEvent(None)
else:
advance_thread(event2coro[event], value)
except ThreadException as te:
# Exception raised from inside a thread.
event = ExceptionEvent(te.exc_info)
if te.coro in delegators:
# The thread is a delegate. Raise exception in its
# delegator.
threads[delegators[te.coro]] = event
del delegators[te.coro]
else:
# The thread is root-level. Raise in client code.
exit_te = te
break
except:
# For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())}
# If any threads still remain, kill them.
for coro in threads:
coro.close()
# If we're exiting with an exception, raise it in the client.
if exit_te:
exit_te.reraise()
# Sockets and their associated events.
class SocketClosedError(Exception):
pass
class Listener(object):
"""A socket wrapper object for listening sockets.
"""
def __init__(self, host, port):
"""Create a listening socket on the given hostname and port.
"""
self._closed = False
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(5)
def accept(self):
"""An event that waits for a connection on the listening socket.
When a connection is made, the event returns a Connection
object.
"""
if self._closed:
raise SocketClosedError()
return AcceptEvent(self)
def close(self):
"""Immediately close the listening socket. (Not an event.)
"""
self._closed = True
self.sock.close()
class Connection(object):
"""A socket wrapper object for connected sockets.
"""
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self._buf = b''
self._closed = False
def close(self):
"""Close the connection."""
self._closed = True
self.sock.close()
def recv(self, size):
"""Read at most size bytes of data from the socket."""
if self._closed:
raise SocketClosedError()
if self._buf:
# We already have data read previously.
out = self._buf[:size]
self._buf = self._buf[size:]
return ValueEvent(out)
else:
return ReceiveEvent(self, size)
def send(self, data):
"""Sends data on the socket, returning the number of bytes
successfully sent.
"""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data)
def sendall(self, data):
"""Send all of data on the socket."""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data, True)
def readline(self, terminator=b"\n", bufsize=1024):
"""Reads a line (delimited by terminator) from the socket."""
if self._closed:
raise SocketClosedError()
while True:
if terminator in self._buf:
line, self._buf = self._buf.split(terminator, 1)
line += terminator
yield ReturnEvent(line)
break
data = yield ReceiveEvent(self, bufsize)
if data:
self._buf += data
else:
line = self._buf
self._buf = b''
yield ReturnEvent(line)
break
class AcceptEvent(WaitableEvent):
"""An event for Listener objects (listening sockets) that suspends
execution until the socket gets a connection.
"""
def __init__(self, listener):
self.listener = listener
def waitables(self):
return (self.listener.sock,), (), ()
def fire(self):
sock, addr = self.listener.sock.accept()
return Connection(sock, addr)
class ReceiveEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously reading data.
"""
def __init__(self, conn, bufsize):
self.conn = conn
self.bufsize = bufsize
def waitables(self):
return (self.conn.sock,), (), ()
def fire(self):
return self.conn.sock.recv(self.bufsize)
class SendEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously writing data.
"""
def __init__(self, conn, data, sendall=False):
self.conn = conn
self.data = data
self.sendall = sendall
def waitables(self):
return (), (self.conn.sock,), ()
def fire(self):
if self.sendall:
return self.conn.sock.sendall(self.data)
else:
return self.conn.sock.send(self.data)
# Public interface for threads; each returns an event object that
# can immediately be "yield"ed.
def null():
"""Event: yield to the scheduler without doing anything special.
"""
return ValueEvent(None)
def spawn(coro):
"""Event: add another coroutine to the scheduler. Both the parent
and child coroutines run concurrently.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % coro)
return SpawnEvent(coro)
def call(coro):
"""Event: delegate to another coroutine. The current coroutine
is resumed once the sub-coroutine finishes. If the sub-coroutine
returns a value using end(), then this event returns that value.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % coro)
return DelegationEvent(coro)
def end(value=None):
"""Event: ends the coroutine and returns a value to its
delegator.
"""
return ReturnEvent(value)
def read(fd, bufsize=None):
"""Event: read from a file descriptor asynchronously."""
if bufsize is None:
# Read all.
def reader():
buf = []
while True:
data = yield read(fd, 1024)
if not data:
break
buf.append(data)
yield ReturnEvent(''.join(buf))
return DelegationEvent(reader())
else:
return ReadEvent(fd, bufsize)
def write(fd, data):
"""Event: write to a file descriptor asynchronously."""
return WriteEvent(fd, data)
def connect(host, port):
"""Event: connect to a network address and return a Connection
object for communicating on the socket.
"""
addr = (host, port)
sock = socket.create_connection(addr)
return ValueEvent(Connection(sock, addr))
def sleep(duration):
"""Event: suspend the thread for ``duration`` seconds.
"""
return SleepEvent(duration)
def join(coro):
"""Suspend the thread until another, previously `spawn`ed thread
completes.
"""
return JoinEvent(coro)
def kill(coro):
"""Halt the execution of a different `spawn`ed thread.
"""
return KillEvent(coro)
# Convenience function for running socket servers.
def server(host, port, func):
"""A coroutine that runs a network server. Host and port specify the
listening address. func should be a coroutine that takes a single
parameter, a Connection object. The coroutine is invoked for every
incoming connection on the listening socket.
"""
def handler(conn):
try:
yield func(conn)
finally:
conn.close()
listener = Listener(host, port)
try:
while True:
conn = yield listener.accept()
yield spawn(handler(conn))
except KeyboardInterrupt:
pass
finally:
listener.close()
|
sonuyos/couchpotato
|
refs/heads/master
|
libs/pyasn1/type/constraint.py
|
382
|
#
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
|
ronq/geomesa
|
refs/heads/master
|
geomesa-spark/geomesa_pyspark/src/main/python/geomesa_pyspark/__init__.py
|
7
|
import glob
import os.path
import pkgutil
import sys
import tempfile
import zipfile
__version__ = '${python.version}'
PACKAGE_EXTENSIONS = {'.zip', '.egg', '.jar'}
def configure(jars=[], packages=[], files=[], spark_home=None, spark_master='yarn', tmp_path=None):
os.environ['PYSPARK_PYTHON'] = sys.executable
spark_home = process_spark_home(spark_home)
pyspark_dir = os.path.join(spark_home, 'python')
pyspark_lib_dir = os.path.join(pyspark_dir, 'lib')
pyspark_lib_zips = glob.glob(os.path.join(pyspark_lib_dir, '*.zip'))
sys_path_set = {path for path in sys.path}
for pyspark_lib_zip in pyspark_lib_zips:
if pyspark_lib_zip not in sys_path_set and os.path.basename(pyspark_lib_zip) != 'pyspark.zip':
sys.path.insert(1, pyspark_lib_zip)
if pyspark_dir not in sys_path_set:
sys.path.insert(1, pyspark_dir)
py_files = pyspark_lib_zips + process_executor_packages(packages, tmp_path)
assert spark_master is 'yarn', 'only yarn master is supported with this release'
import pyspark
# Need differential behavior based for <= Spark 2.0.x, Spark 2.1.0
# is the fist release to provide the module __version__ attribute
pyspark_pre21 = getattr(pyspark, '__version__', None) is None
if pyspark_pre21 and len(jars) > 0:
os.environ['PYSPARK_SUBMIT_ARGS'] = ' '.join(['--driver-class-path', ','.join(jars), 'pyspark-shell'])
conf = (
pyspark.SparkConf()
.setMaster(spark_master)
.set('spark.yarn.dist.jars', ','.join(jars))
.set('spark.yarn.dist.files', ','.join(py_files + files))
.setExecutorEnv('PYTHONPATH', ":".join(map(os.path.basename, py_files)))
.setExecutorEnv('PYSPARK_PYTHON', sys.executable)
)
if not pyspark_pre21 and len(jars):
conf.set('spark.driver.extraClassPath', ','.join(jars))
return conf
def process_spark_home(spark_home):
if spark_home is None:
spark_home = os.environ.get('SPARK_HOME', None)
assert spark_home is not None, 'unable to resolve SPARK_HOME'
assert os.path.isdir(spark_home), '%s is not a directory' % spark_home
os.environ['SPARK_HOME'] = spark_home
return spark_home
def process_executor_packages(executor_packages, tmp_path=None):
if tmp_path is None:
version_info = sys.version_info
tmp_path = os.path.join(tempfile.gettempdir(), 'spark-python-%s.%s' % (version_info.major, version_info.minor))
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
driver_packages = {module for _, module, package in pkgutil.iter_modules() if package is True}
executor_files = []
for executor_package in executor_packages:
if executor_package not in driver_packages:
raise ImportError('unable to locate ' + executor_package + ' installed in driver')
package = sys.modules.get(executor_package, None)
if package is None:
package = pkgutil.get_loader(executor_package).load_module(executor_package)
package_path = os.path.dirname(package.__file__)
package_root = os.path.dirname(package_path)
if package_root[-4:].lower() in PACKAGE_EXTENSIONS:
executor_files.append(package_root)
elif os.path.isdir(package_root):
package_version = getattr(package, '__version__', getattr(package, 'VERSION', None))
zip_name = "%s.zip" % executor_package if package_version is None\
else "%s-%s.zip" % (executor_package, package_version)
zip_path = os.path.join(tmp_path, zip_name)
if not os.path.isfile(zip_path):
zip_package(package_path, zip_path)
executor_files.append(zip_path)
return executor_files
def zip_package(package_path, zip_path):
path_offset = len(os.path.dirname(package_path)) + 1
with zipfile.PyZipFile(zip_path, 'w') as writer:
for root, _, files in os.walk(package_path):
for file in files:
full_path = os.path.join(root, file)
archive_path = full_path[path_offset:]
writer.write(full_path, archive_path)
|
DataDog/integrations-core
|
refs/heads/master
|
nfsstat/datadog_checks/nfsstat/__init__.py
|
1
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .nfsstat import NfsStatCheck
__all__ = ['__version__', 'NfsStatCheck']
|
6809/DwLoadServer
|
refs/heads/master
|
dwload_server/constants.py
|
1
|
# encoding:utf-8
"""
DwLoadServer - A DWLOAD server written in Python
================================================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the DwLoadServer team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
# http://sourceforge.net/p/drivewireserver/wiki/DriveWire_Specification/
OP_NAMEOBJ_MOUNT = 0x01
OP_NAMEOBJ_CREATE = 0x02
OP_READ_EXTENDED = 0xd2
OP_WRITE = 0x57
CODE2NAME = dict( # A little hackish?
[
(v, "%s $%02x" % (k,v))
for k,v in locals().copy().items()
if k.startswith("OP")
]
)
if __name__ == '__main__':
print(CODE2NAME)
|
jcdubacq/pelican-plugins
|
refs/heads/master
|
thumbnailer/thumbnailer.py
|
8
|
import os
import os.path as path
import re
from pelican import signals
import logging
logger = logging.getLogger(__name__)
try:
from PIL import Image, ImageOps
enabled = True
except ImportError:
logging.warning("Unable to load PIL, disabling thumbnailer")
enabled = False
DEFAULT_IMAGE_DIR = "pictures"
DEFAULT_THUMBNAIL_DIR = "thumbnails"
DEFAULT_THUMBNAIL_SIZES = {
'thumbnail_square': '150',
'thumbnail_wide': '150x?',
'thumbnail_tall': '?x150',
}
DEFAULT_TEMPLATE = """<a href="{url}" rel="shadowbox" title="{filename}"><img src="{thumbnail}" alt="{filename}"></a>"""
DEFAULT_GALLERY_THUMB = "thumbnail_square"
class _resizer(object):
""" Resizes based on a text specification, see readme """
REGEX = re.compile(r'(\d+|\?)x(\d+|\?)')
def __init__(self, name, spec, root):
self._name = name
self._spec = spec
# The location of input images from _image_path.
self._root = root
def _null_resize(self, w, h, image):
return image
def _exact_resize(self, w, h, image):
retval = ImageOps.fit(image, (w,h), Image.BICUBIC)
return retval
def _aspect_resize(self, w, h, image):
retval = image.copy()
retval.thumbnail((w, h), Image.ANTIALIAS)
return retval
def resize(self, image):
resizer = self._null_resize
# Square resize and crop
if 'x' not in self._spec:
resizer = self._exact_resize
targetw = int(self._spec)
targeth = targetw
else:
matches = self.REGEX.search(self._spec)
tmpw = matches.group(1)
tmph = matches.group(2)
# Full Size
if tmpw == '?' and tmph == '?':
targetw = image.size[0]
targeth = image.size[1]
resizer = self._null_resize
# Set Height Size
if tmpw == '?':
targetw = image.size[0]
targeth = int(tmph)
resizer = self._aspect_resize
# Set Width Size
elif tmph == '?':
targetw = int(tmpw)
targeth = image.size[1]
resizer = self._aspect_resize
# Scale and Crop
else:
targetw = int(tmpw)
targeth = int(tmph)
resizer = self._exact_resize
logging.debug("Using resizer {0}".format(resizer.__name__))
return resizer(targetw, targeth, image)
def get_thumbnail_name(self, in_path):
# Find the partial path + filename beyond the input image directory.
prefix = path.commonprefix([in_path, self._root])
new_filename = in_path[len(prefix) + 1:]
# Generate the new filename.
(basename, ext) = path.splitext(new_filename)
return "{0}_{1}{2}".format(basename, self._name, ext)
def resize_file_to(self, in_path, out_path, keep_filename=False):
""" Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
"""
if keep_filename:
filename = path.join(out_path, path.basename(in_path))
else:
filename = path.join(out_path, self.get_thumbnail_name(in_path))
out_path = path.dirname(filename)
if not path.exists(out_path):
os.makedirs(out_path)
if not path.exists(filename):
try:
image = Image.open(in_path)
thumbnail = self.resize(image)
thumbnail.save(filename)
logger.info("Generated Thumbnail {0}".format(path.basename(filename)))
except IOError:
logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename)))
def resize_thumbnails(pelican):
""" Resize a directory tree full of images into thumbnails
:param pelican: The pelican instance
:return: None
"""
global enabled
if not enabled:
return
in_path = _image_path(pelican)
out_path = path.join(pelican.settings['OUTPUT_PATH'],
pelican.settings.get('THUMBNAIL_DIR', DEFAULT_THUMBNAIL_DIR))
sizes = pelican.settings.get('THUMBNAIL_SIZES', DEFAULT_THUMBNAIL_SIZES)
resizers = dict((k, _resizer(k, v, in_path)) for k,v in sizes.items())
logger.debug("Thumbnailer Started")
for dirpath, _, filenames in os.walk(in_path):
for filename in filenames:
if not filename.startswith('.'):
for name, resizer in resizers.items():
in_filename = path.join(dirpath, filename)
logger.debug("Processing thumbnail {0}=>{1}".format(filename, name))
if pelican.settings.get('THUMBNAIL_KEEP_NAME', False):
resizer.resize_file_to(in_filename, path.join(out_path, name), True)
else:
resizer.resize_file_to(in_filename, out_path)
def _image_path(pelican):
return path.join(pelican.settings['PATH'],
pelican.settings.get("IMAGE_PATH", DEFAULT_IMAGE_DIR))
def expand_gallery(generator, metadata):
""" Expand a gallery tag to include all of the files in a specific directory under IMAGE_PATH
:param pelican: The pelican instance
:return: None
"""
if "gallery" not in metadata or metadata['gallery'] is None:
return # If no gallery specified, we do nothing
lines = [ ]
base_path = _image_path(generator)
in_path = path.join(base_path, metadata['gallery'])
template = generator.settings.get('GALLERY_TEMPLATE', DEFAULT_TEMPLATE)
thumbnail_name = generator.settings.get("GALLERY_THUMBNAIL", DEFAULT_GALLERY_THUMB)
thumbnail_prefix = generator.settings.get("")
resizer = _resizer(thumbnail_name, '?x?')
for dirpath, _, filenames in os.walk(in_path):
for filename in filenames:
if not filename.startswith('.'):
url = path.join(dirpath, filename).replace(base_path, "")[1:]
url = path.join('/static', generator.settings.get('IMAGE_PATH', DEFAULT_IMAGE_DIR), url).replace('\\', '/')
logger.debug("GALLERY: {0}".format(url))
thumbnail = resizer.get_thumbnail_name(filename)
thumbnail = path.join('/', generator.settings.get('THUMBNAIL_DIR', DEFAULT_THUMBNAIL_DIR), thumbnail).replace('\\', '/')
lines.append(template.format(
filename=filename,
url=url,
thumbnail=thumbnail,
))
metadata['gallery_content'] = "\n".join(lines)
def register():
signals.finalized.connect(resize_thumbnails)
signals.article_generator_context.connect(expand_gallery)
|
166MMX/openjdk.java.net-openjfx-8u40-rt
|
refs/heads/master
|
modules/web/src/main/native/Tools/Scripts/webkitpy/performance_tests/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
daweiwu/meta-iotqa-1
|
refs/heads/master
|
lib/oeqa/runtime/iotivity/iotvt_integration_mnode.py
|
1
|
"""
@file iotvt_integration_mnode.py
"""
##
# @addtogroup iotivity iotivity
# @brief This is iotivity component
# @{
# @addtogroup iotvt_integration iotvt_integration
# @brief This is iotvt_integration module
# @{
##
import os
import time
import string
import subprocess
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import get_files_dir
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.helper import run_as, add_group, add_user, remove_user
from oeqa.utils.decorators import tag
@tag(TestType="EFT", FeatureID="IOTOS-754,IOTOS-1019,IOTOS-1004")
class IOtvtIntegrationMNode(oeRuntimeTest):
"""
@class IOtvtIntegrationMNode
"""
@classmethod
def setUpClass(cls):
'''Clean all the server and client firstly
@fn setUpClass
@param cls
@return
'''
# Init main target
run_as("root", "killall presenceserver presenceclient devicediscoveryserver devicediscoveryclient", target=cls.tc.targets[0])
run_as("root", "killall fridgeserver fridgeclient garageserver garageclient groupserver groupclient", target=cls.tc.targets[0])
run_as("root", "killall roomserver roomclient simpleserver simpleclient simpleserverHQ simpleclientHQ", target=cls.tc.targets[0])
run_as("root", "killall simpleclientserver threadingsample", target=cls.tc.targets[0])
# Init second target
run_as("root", "killall presenceserver presenceclient devicediscoveryserver devicediscoveryclient", target=cls.tc.targets[1])
run_as("root", "killall fridgeserver fridgeclient garageserver garageclient groupserver groupclient", target=cls.tc.targets[1])
run_as("root", "killall roomserver roomclient simpleserver simpleclient simpleserverHQ simpleclientHQ", target=cls.tc.targets[1])
run_as("root", "killall simpleclientserver threadingsample", target=cls.tc.targets[1])
# Clean output file on two targets, main is client part and second is server part
run_as("root", "rm -f /tmp/svr_output", target=cls.tc.targets[1])
run_as("root", "rm -f /tmp/output", target=cls.tc.targets[0])
# add group and non-root user on both sides
add_group("tester", target=cls.tc.targets[0])
add_user("iotivity-tester", "tester", target=cls.tc.targets[0])
add_group("tester", target=cls.tc.targets[1])
add_user("iotivity-tester", "tester", target=cls.tc.targets[1])
# Setup firewall accept for multicast, on both sides
run_as("root", "/usr/sbin/iptables -w -A INPUT -p udp --dport 5683 -j ACCEPT", target=cls.tc.targets[0])
run_as("root", "/usr/sbin/iptables -w -A INPUT -p udp --dport 5684 -j ACCEPT", target=cls.tc.targets[0])
run_as("root", "/usr/sbin/iptables -w -A INPUT -p udp --dport 5683 -j ACCEPT", target=cls.tc.targets[1])
run_as("root", "/usr/sbin/iptables -w -A INPUT -p udp --dport 5684 -j ACCEPT", target=cls.tc.targets[1])
# check if image contains iotivity example applications
(status, output) = run_as("root", "ls /opt/iotivity/examples/resource/", target=cls.tc.targets[0])
if "cpp" in output:
pass
else:
assert 1 == 0, 'There is no iotivity exmaple app installed in target0'
(status, output) = run_as("root", "ls /opt/iotivity/examples/resource/", target=cls.tc.targets[1])
if "cpp" in output:
pass
else:
assert 1 == 0, 'There is no iotivity exmaple app installed in target1'
@classmethod
def tearDownClass(cls):
'''Clean user
@fn setUpClass
@param cls
@return
'''
remove_user("iotivity-tester", target=cls.tc.targets[0])
remove_user("iotivity-tester", target=cls.tc.targets[1])
def get_server_ipv6(self):
"""
@fn get_server_ipv6
@param self
@return
"""
time.sleep(1)
# Check ip address by ifconfig command
interface = "nothing"
(status, interface) = run_as("root", "ifconfig | grep '^enp'", target=self.targets[1])
(status, output) = run_as("root", "ifconfig %s | grep 'inet6 addr:'" % interface.split()[0], target=self.targets[1])
return output.split('%')[0].split()[-1]
def presence_check(self, para):
'''this is a function used by presence test
@fn presence_check
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/presenceserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/presenceclient -t %d > /tmp/output &" % para
run_as("iotivity-tester", client_cmd, target=self.targets[0])
# Some platform is too slow, it needs more time to sleep. E.g. MinnowMax
time.sleep(60)
(status, output) = run_as("iotivity-tester", "cat /tmp/output", target=self.targets[0])
run_as("root", "killall presenceserver presenceclient", target=self.targets[0])
run_as("root", "killall presenceserver presenceclient", target=self.targets[1])
time.sleep(3)
return output.count("Received presence notification from : %s" % self.targets[1].ip) + \
output.count("Received presence notification from : %s" % self.get_server_ipv6())
def test_mnode_fridge(self):
'''
Test fridgeserver and fridgeclient.
The server registers resource with 2 doors and 1 light, client connects to the
server and fetch the information to print out.
@fn test_fridge
@param self
@return
'''
# ensure env is clean
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/fridgeserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/fridgeclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
time.sleep(5)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output', target=self.targets[0])
# judge if the values are correct
ret = 0
if "Name of device: Intel Powered 2 door, 1 light refrigerator" in output and \
"Delete ID is 0 and resource URI is /device" in output:
pass
else:
ret = 1
# kill server and client
run_as("root", "killall fridgeserver fridgeclient", target=self.targets[0])
run_as("root", "killall fridgeserver fridgeclient", target=self.targets[1])
time.sleep(3)
##
# TESTPOINT: #1, test_fridge
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_mnode_garage(self):
'''
Test garageserver and garageclient.
While server and client communication, remove one attribute Name from
OCRepresentation. Then the attribute number of OCRepresentation should
reduce 1.
@fn test_garage
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/garageserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/garageclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
time.sleep(5)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output', target=self.targets[0])
# judge if the values are correct
ret = 0
if "GET request was successful" in output and \
"attribute: name, was removed successfully from rep2." in output and \
"Number of attributes in rep2: 6" in output and \
"PUT request was successful" in output:
pass
else:
ret = 1
# kill server and client
run_as("root", "killall garageserver garageclient", target=self.targets[0])
run_as("root", "killall garageserver garageclient", target=self.targets[1])
time.sleep(3)
##
# TESTPOINT: #1, test_garage
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_mnode_group(self):
'''
groupclient has 4 main operations. Only option1 is doable.
In option (user inputs 1), it will set ActionSet value of rep. This case
is to check if the set operation is done.
@fn test_group
@param self
@return
'''
# start light server and group server
lightserver_cmd = "/opt/iotivity/examples/resource/cpp/lightserver > /tmp/svr_output &"
(status, output) = run_as("root", lightserver_cmd, target=self.targets[1])
time.sleep(2)
ssh_cmd = "ssh root@%s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR" % self.targets[1].ip
groupserver_cmd = "/opt/iotivity/examples/resource/cpp/groupserver > /dev/null 2>&1"
subprocess.Popen("%s %s" % (ssh_cmd, groupserver_cmd), shell=True)
time.sleep(3)
# start client to get info, here needs user input. So use expect
exp_cmd = os.path.join(os.path.dirname(__file__), "files/group_client.exp")
status, output = shell_cmd_timeout("expect %s %s" % (exp_cmd, self.target.ip), timeout=200)
# kill server and client
run_as("root", "killall lightserver groupserver groupclient", target=self.targets[0])
run_as("root", "killall lightserver groupserver groupclient", target=self.targets[1])
time.sleep(3)
##
# TESTPOINT: #1, test_group
#
self.assertEqual(status, 2, msg="expect excution fail\n %s" % output)
def test_mnode_presence_unicast(self):
'''
Presence test is complex. It contains 6 sub-tests.
Main goal (client) is to observe server resource presence status (presence/stop).
Every change will trigger a Received presence notification on client side.
To each client observation mode:
-t 1 Unicast --- it will receive 7 notifications
-t 2 Unicast with one filter --- it will receive 5 notifications
-t 3 Unicast with two filters --- it will receive 6 notifications
-t 4 Multicast --- it will receive 7 notifications
-t 5 Multicast with one filter --- it will receive 5 notifications
-t 6 Multicast with two filters --- it will receive 6 notifications
@fn test_presence_unicast
@param self
@return
'''
number = self.presence_check(1)
##
# TESTPOINT: #1, test_presence_unicast
#
assert number > 0, "type 1 should have no notifications"
def test_mnode_presence_unicast_one_filter(self):
''' See instruction in test_presence_unicast.
@fn test_presence_unicast_one_filter
@param self
@return
'''
number = self.presence_check(2)
##
# TESTPOINT: #1, test_presence_unicast_one_filter
#
assert number > 0, "type 2 should have no notifications"
def test_mnode_presence_unicast_two_filters(self):
''' See instruction in test_presence_unicast.
@fn test_presence_unicast_two_filters
@param self
@return
'''
number = self.presence_check(3)
##
# TESTPOINT: #1, test_presence_unicast_two_filters
#
assert number > 0, "type 3 should have no notifications"
def test_mnode_presence_multicast(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast
@param self
@return
'''
number = self.presence_check(4)
##
# TESTPOINT: #1, test_presence_multicast
#
assert number > 0, "type 4 should have no notifications"
def test_mnode_presence_multicast_one_filter(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast_one_filter
@param self
@return
'''
number = self.presence_check(5)
##
# TESTPOINT: #1, test_presence_multicast_one_filter
#
assert number > 0, "type 5 should have no notifications"
def test_mnode_presence_multicast_two_filters(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast_two_filters
@param self
@return
'''
number = self.presence_check(6)
##
# TESTPOINT: #1, test_presence_multicast_two_filters
#
assert number > 0, "type 6 should have no notifications"
def test_mnode_room_default_collection(self):
'''
When number is 1 and request is put, light and fan give response individually.
So, there is no 'In Server CPP entity handler' output. Each respone is given by
light or fan.
@fn test_room_default_collection
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/roomserver 1 > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/roomclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
time.sleep(5)
(status, output) = run_as("iotivity-tester", "cat /tmp/svr_output", target=self.targets[1])
# kill server and client
run_as("root", "killall roomserver roomclient", target=self.targets[0])
run_as("root", "killall roomserver roomclient", target=self.targets[1])
time.sleep(3)
##
# TESTPOINT: #1, test_room_default_collection
#
self.assertEqual(output.count("In Server CPP entity handler"), 0, msg="CPP entity handler is: %s" % output)
def test_mnode_room_application_collection(self):
'''
When number is 2 and request is put, room entity handler give light and fan
response. So, there are 3 responses output: In Server CPP entity handler.
In the middle one, it will handle light and fan.
@fn test_room_application_collection
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/roomserver 2 > /tmp/svr_output &"
run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/roomclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
time.sleep(6)
(status, output) = run_as("iotivity-tester", "cat /tmp/svr_output", target=self.targets[1])
# kill server and client
run_as("root", "killall roomserver roomclient", target=self.targets[0])
run_as("root", "killall roomserver roomclient", target=self.targets[1])
time.sleep(3)
##
# TESTPOINT: #1, test_room_application_collection
#
self.assertEqual(output.count("In Server CPP entity handler"), 3, msg="CPP entity handler is: %s" % output)
def test_mnode_simple(self):
'''
Test simpleserver and simpleclient.
After finding resource, simpleclient will do: GET, PUT, POST, Observer sequencely.
@fn test_simple
@param self
@return
'''
for i in range(3):
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/simpleserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/simpleclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
print "\npatient... simpleclient needs long time for its observation"
time.sleep(70)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output', target=self.targets[0])
# kill server and client
run_as("root", "killall simpleserver simpleclient", target=self.targets[0])
run_as("root", "killall simpleserver simpleclient", target=self.targets[1])
time.sleep(3)
# judge if the values are correct
ret = 0
if "DISCOVERED Resource" in output and \
"GET request was successful" in output and \
"PUT request was successful" in output and \
"POST request was successful" in output and \
"Observe is used." in output:
break
else:
ret = 1
##
# TESTPOINT: #1, test_simple
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_mnode_simpleHQ(self):
'''
Test simpleserverHQ and simpleclientHQ.
Compared to simpleserver, simpleserverHQ removes SlowResponse, and give
sendResponse (when PUT) / sendPostResponse (when POST). Basically, they
are the same.
@fn test_simpleHQ
@param self
@return
'''
for i in range(3):
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/simpleserverHQ > /tmp/svr_output &"
run_as("iotivity-tester", server_cmd, target=self.targets[1])
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/simpleclientHQ > /tmp/output &"
run_as("iotivity-tester", client_cmd, target=self.targets[0])
print "\npatient... simpleclientHQ needs long time for its observation"
time.sleep(70)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output', target=self.targets[0])
# kill server and client
run_as("root", "killall simpleserverHQ simpleclientHQ", target=self.targets[0])
run_as("root", "killall simpleserverHQ simpleclientHQ", target=self.targets[1])
time.sleep(3)
# judge if the values are correct
ret = 0
if "DISCOVERED Resource" in output and \
"GET request was successful" in output and \
"PUT request was successful" in output and \
"POST request was successful" in output and \
"Observe is used." in output:
break
else:
ret = 1
##
# TESTPOINT: #1, test_simpleHQ
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
##
# @}
# @}
##
|
johnnycastilho/brotli
|
refs/heads/master
|
tools/rfc-format.py
|
99
|
#!/usr/bin/python
#
# Takes an .nroff source file and prints a text file in RFC format.
#
# Usage: rfc-format.py <source file>
import re
import sys
from subprocess import Popen, PIPE
def Readfile(fn):
f = open(fn, "r")
return f.read()
def FixNroffOutput(buf):
p = re.compile(r'(.*)FORMFEED(\[Page\s+\d+\])$')
strip_empty = False
out = ""
for line in buf.split("\n"):
line = line.replace("\xe2\x80\x99", "'")
line = line.replace("\xe2\x80\x90", "-")
for i in range(len(line)):
if ord(line[i]) > 128:
print >>sys.stderr, "Invalid character %d\n" % ord(line[i])
m = p.search(line)
if strip_empty and len(line) == 0:
continue
if m:
out += p.sub(r'\1 \2\n\f', line)
out += "\n"
strip_empty = True
else:
out += "%s\n" % line
strip_empty = False
return out.rstrip("\n")
def Nroff(buf):
p = Popen(["nroff", "-ms"], stdin=PIPE, stdout=PIPE)
out, err = p.communicate(input=buf)
return FixNroffOutput(out)
def FormatTocLine(section, title, page):
line = ""
level = 1
if section:
level = section.count(".")
for i in range(level):
line += " "
if section:
line += "%s " % section
line += "%s " % title
pagenum = "%d" % page
nspace = 72 - len(line) - len(pagenum)
if nspace % 2:
line += " "
for i in range(nspace / 2):
line += ". "
line += "%d\n" % page
return line
def CreateToc(buf):
p1 = re.compile(r'^((\d+\.)+)\s+(.*)$')
p2 = re.compile(r'^(Appendix [A-Z].)\s+(.*)$')
p3 = re.compile(r'\[Page (\d+)\]$')
found = 0
page = 1
out = ""
for line in buf.split("\n"):
m1 = p1.search(line)
m2 = p2.search(line)
m3 = p3.search(line)
if m1:
out += FormatTocLine(m1.group(1), m1.group(3), page)
elif m2:
out += FormatTocLine(m2.group(1), m2.group(2), page)
elif line.startswith("Authors"):
out += FormatTocLine(None, line, page)
elif m3:
page = int(m3.group(1)) + 1
return out
src = Readfile(sys.argv[1])
out = Nroff(src)
toc = CreateToc(out)
src = src.replace("INSERT_TOC_HERE", toc)
print Nroff(src)
|
drpeteb/scipy
|
refs/heads/master
|
scipy/weave/_dumb_shelve.py
|
100
|
from __future__ import division, print_function, absolute_import
from shelve import Shelf
try:
import zlib
except ImportError:
# Some python installations don't have zlib.
pass
import pickle
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "anydbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c'):
from . import _dumbdbm_patched
Shelf.__init__(self, _dumbdbm_patched.open(filename, flag))
def __getitem__(self, key):
compressed = self.dict[key]
try:
r = zlib.decompress(compressed)
except zlib.error:
r = compressed
except NameError:
r = compressed
return pickle.loads(r)
def __setitem__(self, key, value):
s = pickle.dumps(value,1)
try:
self.dict[key] = zlib.compress(s)
except NameError:
#zlib doesn't exist, leave it uncompressed.
self.dict[key] = s
def open(filename, flag='c'):
"""Open a persistent dictionary for reading and writing.
Argument is the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag)
|
nvoron23/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/build/gyp/test/mac/gyptest-app-error.py
|
164
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format == 'ninja' and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
|
kashif/chainer
|
refs/heads/master
|
tests/chainer_tests/functions_tests/math_tests/test_sum.py
|
1
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)],
'keepdims': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSum(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
g_shape = self.x.sum(axis=self.axis, keepdims=self.keepdims).shape
self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.sum(x, axis=self.axis, keepdims=self.keepdims)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = self.x.sum(axis=self.axis, keepdims=self.keepdims)
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-3}
else:
options = {}
testing.assert_allclose(y_expect, y.data, **options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
lambda x: functions.sum(x, self.axis, self.keepdims),
x_data, y_grad, atol=1e-4, dtype=numpy.float64)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_axis_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSumError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
functions.Sum([0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
functions.Sum((1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.Sum((0, 0))
def test_pos_neg_duplicate_axis(self):
with self.assertRaises(ValueError):
self.x.sum(axis=(1, -2))
testing.run_module(__name__, __file__)
|
hlt-mt/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/sparsemask_op_test.py
|
10
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class SparseMaskTest(tf.test.TestCase):
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
indices = np.array([0, 2, 3, 4], dtype=np.int32)
mask_indices = np.array([0], dtype=np.int32)
out_values = values[1:, :]
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.test_session() as sess:
values_tensor = tf.convert_to_tensor(values)
indices_tensor = tf.convert_to_tensor(indices)
mask_indices_tensor = tf.convert_to_tensor(mask_indices)
t = tf.IndexedSlices(values_tensor, indices_tensor)
masked_t = tf.sparse_mask(t, mask_indices_tensor)
tf_out_values, tf_out_indices = sess.run([masked_t.values,
masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
if __name__ == "__main__":
tf.test.main()
|
waytai/odoo
|
refs/heads/8.0
|
addons/sale_order_dates/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
deka108/meas_deka
|
refs/heads/release-deka
|
api/urls.py
|
2
|
"""
# Name: cms/urls.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: Nov 23, 2016
# Last Modified: Nov 23, 2016
# Modified by: Phuc Le-Sanh
"""
from django.conf.urls import url, include
# from rest_framework import routers
from rest_framework.authtoken import views as rest_views
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
# router = routers.SimpleRouter()
# router.register("question/search", QuestionSearchView, base_name="question-search")
urlpatterns = [
# url(r'^', include(router.urls)),
url(r'^topics/$', views.TopicList.as_view(), name='topic-list'),
url(r'^topics/(?P<pk>[0-9]+)/$', views.TopicDetail.as_view(),
name='topic-detail'),
url(r'^concepts/$', views.ConceptList.as_view(), name='concept-list'),
url(r'^concepts/(?P<pk>[0-9]+)/$', views.ConceptDetail.as_view(),
name='concept-detail'),
url(r'^papers/$', views.PaperList.as_view(), name='paper-list'),
url(r'^papers/(?P<pk>[0-9]+)/$', views.PaperDetail.as_view(),
name='paper-detail'),
url(r'^questions/$', views.QuestionList.as_view(), name='question-list'),
url(r'^questions/(?P<pk>[0-9]+)/$', views.QuestionDetail.as_view(),
name='question-detail'),
url(r'^answerparts/$', views.AnswerPartList.as_view(),
name='answerpart-list'),
url(r'^answerparts/(?P<pk>[0-9]+)/$', views.AnswerPartDetail.as_view(),
name='answerpart-detail'),
# education levels
url(r'^subjects/$', views.SubjectList.as_view(), name='subject-list'),
url(r'^subjects/(?P<pk>[0-9]+)/$', views.SubjectDetail.as_view(),
name='subject-detail'),
# topics
url(r'^(?P<subj_id>[0-9]+)/topics/$', views.TopicList.as_view(),
name='subj-topic-list'),
# Concepts
url(r'^(?P<subj_id>[0-9]+)/concepts/$', views.ConceptList.as_view(),
name='subj-concept-list'),
url(r'^topics/(?P<topic_id>[0-9]+)/concepts/$',
views.ConceptList.as_view(), name='topic-concept-list'),
# Questions
url(r'^(?P<subj_id>[0-9]+)/questions/$', views.QuestionList.as_view(),
name='subj-question-list'),
url(r'^topics/(?P<topic_id>[0-9]+)/questions/$',
views.QuestionList.as_view(), name='topic-question-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/questions/$',
views.QuestionList.as_view(), name='concept-question-list'),
# Keypoints
url(r'^keypoints/$', views.KeyPointList.as_view(), name='keypoint-list'),
url(r'^keypoints/(?P<pk>[0-9]+)/$', views.KeyPointDetail.as_view(),
name='keypoint-detail'),
url(r'^concepts/(?P<concept_id>[0-9]+)/keypoints/$',
views.KeyPointList.as_view(), name='concept-keypoint-list'),
# Sample Questions
url(r'^samplequestions/$', views.QuestionList.as_view(),
name='samplequestion-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/samplequestions/$',
views.QuestionList.as_view(), name='concept-samplequestion-list'),
# Sample Questions
url(r'^realquestions/$', views.QuestionList.as_view(),
name='realquestion-list'),
url(r'^concepts/(?P<concept_id>[0-9]+)/realquestions/$',
views.QuestionList.as_view(), name='concept-realquestion-list'),
# Formulas
url(r'^formulas/$', views.FormulaList.as_view(), name="formula-list"),
url(r'^formulas/(?P<pk>[0-9]+)/$', views.FormulaDetail.as_view(),
name="formula-detail"),
url(r'^formulas/reindex/all', views.reindex_all_formula,
name="formula-reindex-all"),
# FormulaIndex
url(r'^formulaindexes/$', views.FormulaIndexList.as_view(),
name="formula-index-list"),
# Search
url(r'^search/db$', views.search_text_db, name="search_db_text"),
url(r'^fsearch/$', views.search_formula, name="search_formula"),
url(r'^csearch/$', views.search_formula_cluster,
name="search_formula_cluster"),
# url(r'^searchf$', ),
# account
# url(r'^register/$', ),
# url(r'^logout/$', ),
]
urlpatterns += [
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', rest_views.obtain_auth_token),
]
# For assessment
urlpatterns += [
url(r'^check_answer/$', views.check_answer, name='check_answer'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
isandlaTech/cohorte-runtime
|
refs/heads/master
|
python/src/lib/python/unidecode/x08e.py
|
252
|
data = (
'Chu ', # 0x00
'Jing ', # 0x01
'Nie ', # 0x02
'Xiao ', # 0x03
'Bo ', # 0x04
'Chi ', # 0x05
'Qun ', # 0x06
'Mou ', # 0x07
'Shu ', # 0x08
'Lang ', # 0x09
'Yong ', # 0x0a
'Jiao ', # 0x0b
'Chou ', # 0x0c
'Qiao ', # 0x0d
'[?] ', # 0x0e
'Ta ', # 0x0f
'Jian ', # 0x10
'Qi ', # 0x11
'Wo ', # 0x12
'Wei ', # 0x13
'Zhuo ', # 0x14
'Jie ', # 0x15
'Ji ', # 0x16
'Nie ', # 0x17
'Ju ', # 0x18
'Ju ', # 0x19
'Lun ', # 0x1a
'Lu ', # 0x1b
'Leng ', # 0x1c
'Huai ', # 0x1d
'Ju ', # 0x1e
'Chi ', # 0x1f
'Wan ', # 0x20
'Quan ', # 0x21
'Ti ', # 0x22
'Bo ', # 0x23
'Zu ', # 0x24
'Qie ', # 0x25
'Ji ', # 0x26
'Cu ', # 0x27
'Zong ', # 0x28
'Cai ', # 0x29
'Zong ', # 0x2a
'Peng ', # 0x2b
'Zhi ', # 0x2c
'Zheng ', # 0x2d
'Dian ', # 0x2e
'Zhi ', # 0x2f
'Yu ', # 0x30
'Duo ', # 0x31
'Dun ', # 0x32
'Chun ', # 0x33
'Yong ', # 0x34
'Zhong ', # 0x35
'Di ', # 0x36
'Zhe ', # 0x37
'Chen ', # 0x38
'Chuai ', # 0x39
'Jian ', # 0x3a
'Gua ', # 0x3b
'Tang ', # 0x3c
'Ju ', # 0x3d
'Fu ', # 0x3e
'Zu ', # 0x3f
'Die ', # 0x40
'Pian ', # 0x41
'Rou ', # 0x42
'Nuo ', # 0x43
'Ti ', # 0x44
'Cha ', # 0x45
'Tui ', # 0x46
'Jian ', # 0x47
'Dao ', # 0x48
'Cuo ', # 0x49
'Xi ', # 0x4a
'Ta ', # 0x4b
'Qiang ', # 0x4c
'Zhan ', # 0x4d
'Dian ', # 0x4e
'Ti ', # 0x4f
'Ji ', # 0x50
'Nie ', # 0x51
'Man ', # 0x52
'Liu ', # 0x53
'Zhan ', # 0x54
'Bi ', # 0x55
'Chong ', # 0x56
'Lu ', # 0x57
'Liao ', # 0x58
'Cu ', # 0x59
'Tang ', # 0x5a
'Dai ', # 0x5b
'Suo ', # 0x5c
'Xi ', # 0x5d
'Kui ', # 0x5e
'Ji ', # 0x5f
'Zhi ', # 0x60
'Qiang ', # 0x61
'Di ', # 0x62
'Man ', # 0x63
'Zong ', # 0x64
'Lian ', # 0x65
'Beng ', # 0x66
'Zao ', # 0x67
'Nian ', # 0x68
'Bie ', # 0x69
'Tui ', # 0x6a
'Ju ', # 0x6b
'Deng ', # 0x6c
'Ceng ', # 0x6d
'Xian ', # 0x6e
'Fan ', # 0x6f
'Chu ', # 0x70
'Zhong ', # 0x71
'Dun ', # 0x72
'Bo ', # 0x73
'Cu ', # 0x74
'Zu ', # 0x75
'Jue ', # 0x76
'Jue ', # 0x77
'Lin ', # 0x78
'Ta ', # 0x79
'Qiao ', # 0x7a
'Qiao ', # 0x7b
'Pu ', # 0x7c
'Liao ', # 0x7d
'Dun ', # 0x7e
'Cuan ', # 0x7f
'Kuang ', # 0x80
'Zao ', # 0x81
'Ta ', # 0x82
'Bi ', # 0x83
'Bi ', # 0x84
'Zhu ', # 0x85
'Ju ', # 0x86
'Chu ', # 0x87
'Qiao ', # 0x88
'Dun ', # 0x89
'Chou ', # 0x8a
'Ji ', # 0x8b
'Wu ', # 0x8c
'Yue ', # 0x8d
'Nian ', # 0x8e
'Lin ', # 0x8f
'Lie ', # 0x90
'Zhi ', # 0x91
'Li ', # 0x92
'Zhi ', # 0x93
'Chan ', # 0x94
'Chu ', # 0x95
'Duan ', # 0x96
'Wei ', # 0x97
'Long ', # 0x98
'Lin ', # 0x99
'Xian ', # 0x9a
'Wei ', # 0x9b
'Zuan ', # 0x9c
'Lan ', # 0x9d
'Xie ', # 0x9e
'Rang ', # 0x9f
'Xie ', # 0xa0
'Nie ', # 0xa1
'Ta ', # 0xa2
'Qu ', # 0xa3
'Jie ', # 0xa4
'Cuan ', # 0xa5
'Zuan ', # 0xa6
'Xi ', # 0xa7
'Kui ', # 0xa8
'Jue ', # 0xa9
'Lin ', # 0xaa
'Shen ', # 0xab
'Gong ', # 0xac
'Dan ', # 0xad
'Segare ', # 0xae
'Qu ', # 0xaf
'Ti ', # 0xb0
'Duo ', # 0xb1
'Duo ', # 0xb2
'Gong ', # 0xb3
'Lang ', # 0xb4
'Nerau ', # 0xb5
'Luo ', # 0xb6
'Ai ', # 0xb7
'Ji ', # 0xb8
'Ju ', # 0xb9
'Tang ', # 0xba
'Utsuke ', # 0xbb
'[?] ', # 0xbc
'Yan ', # 0xbd
'Shitsuke ', # 0xbe
'Kang ', # 0xbf
'Qu ', # 0xc0
'Lou ', # 0xc1
'Lao ', # 0xc2
'Tuo ', # 0xc3
'Zhi ', # 0xc4
'Yagate ', # 0xc5
'Ti ', # 0xc6
'Dao ', # 0xc7
'Yagate ', # 0xc8
'Yu ', # 0xc9
'Che ', # 0xca
'Ya ', # 0xcb
'Gui ', # 0xcc
'Jun ', # 0xcd
'Wei ', # 0xce
'Yue ', # 0xcf
'Xin ', # 0xd0
'Di ', # 0xd1
'Xuan ', # 0xd2
'Fan ', # 0xd3
'Ren ', # 0xd4
'Shan ', # 0xd5
'Qiang ', # 0xd6
'Shu ', # 0xd7
'Tun ', # 0xd8
'Chen ', # 0xd9
'Dai ', # 0xda
'E ', # 0xdb
'Na ', # 0xdc
'Qi ', # 0xdd
'Mao ', # 0xde
'Ruan ', # 0xdf
'Ren ', # 0xe0
'Fan ', # 0xe1
'Zhuan ', # 0xe2
'Hong ', # 0xe3
'Hu ', # 0xe4
'Qu ', # 0xe5
'Huang ', # 0xe6
'Di ', # 0xe7
'Ling ', # 0xe8
'Dai ', # 0xe9
'Ao ', # 0xea
'Zhen ', # 0xeb
'Fan ', # 0xec
'Kuang ', # 0xed
'Ang ', # 0xee
'Peng ', # 0xef
'Bei ', # 0xf0
'Gu ', # 0xf1
'Ku ', # 0xf2
'Pao ', # 0xf3
'Zhu ', # 0xf4
'Rong ', # 0xf5
'E ', # 0xf6
'Ba ', # 0xf7
'Zhou ', # 0xf8
'Zhi ', # 0xf9
'Yao ', # 0xfa
'Ke ', # 0xfb
'Yi ', # 0xfc
'Qing ', # 0xfd
'Shi ', # 0xfe
'Ping ', # 0xff
)
|
tjth/lotterycoin
|
refs/heads/master
|
qa/rpc-tests/test_framework/comptool.py
|
7
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == 'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == 'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
pasqualguerrero/django
|
refs/heads/master
|
tests/utils_tests/test_module/another_bad_module.py
|
514
|
from . import site
content = 'Another Bad Module'
site._registry.update({
'foo': 'bar',
})
raise Exception('Some random exception.')
|
LukasBoersma/pyowm
|
refs/heads/master
|
sphinx/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# pyowm documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 23 21:47:00 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyowm'
copyright = u'2014, Claudio Sparpaglione (@csparpa)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyowmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyowm.tex', u'pyowm Documentation',
u'Claudio Sparpaglione (@csparpa)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyowm', u'pyowm Documentation',
[u'Claudio Sparpaglione (@csparpa)'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyowm', u'pyowm Documentation',
u'Claudio Sparpaglione (@csparpa)', 'pyowm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyowm'
epub_author = u'Claudio Sparpaglione (@csparpa)'
epub_publisher = u'Claudio Sparpaglione (@csparpa)'
epub_copyright = u'2014, Claudio Sparpaglione (@csparpa)'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
simbs/edx-platform
|
refs/heads/master
|
common/djangoapps/third_party_auth/tests/specs/test_linkedin.py
|
112
|
"""Integration tests for LinkedIn providers."""
from third_party_auth.tests.specs import base
class LinkedInOauth2IntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for provider.LinkedInOauth2."""
def setUp(self):
super(LinkedInOauth2IntegrationTest, self).setUp()
self.provider = self.configure_linkedin_provider(
enabled=True,
key='linkedin_oauth2_key',
secret='linkedin_oauth2_secret',
)
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
}
USER_RESPONSE_DATA = {
'lastName': 'lastName_value',
'id': 'id_value',
'firstName': 'firstName_value',
}
def get_username(self):
response_data = self.get_response_data()
return response_data.get('firstName') + response_data.get('lastName')
|
VladiMihaylenko/omim
|
refs/heads/master
|
tools/python/google_translate.py
|
53
|
#!/usr/bin/python
import re
import sys
import urllib
import simplejson
import time
baseUrl = "http://www.googleapis.com/language/translate/v2"
def translate(text,src='en'):
targetLangs = ["ja", "fr", "ar", "de", "ru", "sv", "zh", "fi", "ko", "ka", "be", "nl", "ga", "el", "it", "es", "th", "ca", "cy", "hu", "sr", "fa", "eu", "pl", "uk", "sl", "ro", "sq", "cs", "sk", "af", "hr", "hy", "tr", "pt", "lt", "bg", "la", "et", "vi", "mk", "lv", "is", "hi"]
retText=''
for target in targetLangs:
params = ({'source': src,
'target': target,
'key': 'AIzaSyDD5rPHpqmeEIRVI34wYI1zMplMq9O_w2k'
})
translation = target + ':'
params['q'] = text
resp = simplejson.load(urllib.urlopen('%s' % (baseUrl), data = urllib.urlencode(params)))
print resp
try:
translation += resp['data']['translations']['translatedText']
except:
return retText
retText += '|' + translation
return retText
def test():
for line in sys.stdin:
line = line.rstrip('\n\r')
retText = 'en:' + line + translate(line)
print retText
if __name__=='__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
try:
test()
except KeyboardInterrupt:
print "\n"
sys.exit(0)
|
pb-cdunn/FALCON-examples
|
refs/heads/master
|
run/synth0/check.py
|
1
|
#!/usr/bin/env python3.7
import sys
def read_dna(ifs):
"""Assume one contig.
"""
header = ifs.readline()
dna = ""
for line in ifs:
dna += line.strip()
return dna
def reverse_complement(dna):
complement = {"T": "A", "A": "T",
"G": "C", "C": "G"}
return ''.join([complement[base] for base in reversed(dna)])
def compare_rot(ref, qry):
"""Compare circular DNA.
"""
if len(ref) != len(qry):
raise Exception('%d != %d' %(len(ref), len(qry)))
for i in range(len(ref)):
rot = ref[i:] + ref[:i]
if rot == qry:
return i
raise Exception('no match')
def compare_circ(ref, qry):
"""Compare circular DNA.
"""
try:
d = 'n'
shift = compare_rot(ref, qry)
except Exception:
d = 'rc'
shift = compare_rot(reverse_complement(ref), qry)
print('shifted by %d (%s)' %(shift, d))
def main(prog, ref='data/synth5k/ref.fasta', qry='2-asm-falcon/p_ctg.fasta'):
compare_circ(read_dna(open(ref)), read_dna(open(qry)))
main(*sys.argv)
|
fierval/retina
|
refs/heads/master
|
DiabeticRetinopathy/Features/extract_1dlbp_images_features.py
|
1
|
from numba import cuda
from timeit import default_timer as timer
import numpy as np
from os import path
from tr_utils import append_to_arr, prep_out_path
from train_files import TrainFiles
from skimage import io
from cv2 import imread
root_path = "/kaggle/retina/train"
inp_path = path.join(root_path, "cropped")
out_path = path.join(root_path, "1dlbp")
ext = ".lbp"
neighborhood = 4
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood):
res = np.zeros((input.shape[0] - 2 * neighborhood), dtype='int32')
powers = 2 ** np.array(range(0, 2 * neighborhood), dtype='int32')
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
i = blockDim * block + thread
if i >= res.shape[0]:
return res
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
res [i - neighborhood] += powers[neighborhood - (i - j)]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
res [i - neighborhood] += powers[j - i + neighborhood - 1]
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros((input.shape[0] - 2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[i - neighborhood] = np.sum(p [both >= input[i]])
return res
def file_histogram(lbps, neighborhood):
"""
Create a histogram out of the exracted pattern
"""
bins = 2 ** (2 * neighborhood)
hist = np.zeros(bins, dtype='int')
for lbp in lbps:
hist[lbp] += 1
return hist
def get_1dlbp_features(neighborhood):
tf = TrainFiles(inp_path, floor = neighborhood * 2 + 1)
inputs = tf.get_training_inputs()
start = timer()
hist = np.array([])
outs = np.array([])
i = 0
writeBatch = 100
prep_out_path(out_path)
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for inp in inputs:
data_file = path.join(inp_path, inp)
out_file = path.join(out_path, path.splitext(inp)[0] + ext)
arr = np.ascontiguousarray(imread(data_file)[:, :, 2].reshape(-1))
##GPU##
file_hist = extract_1dlbp_gpu(arr, neighborhood, d_powers)
##CPU##
#file_hist = extract_1dlbp_cpu(arr, neighborhood, p)
#file_hist = file_histogram(file_hist, neighborhood)
i += 1
hist = append_to_arr(hist, file_hist)
outs = append_to_arr(outs, out_file)
if i == writeBatch:
i = 0
first = True
for j in range(0, outs.shape[0]):
hist[j].tofile(outs[j])
hist = np.array([])
outs = np.array([])
print "==============Done==================="
print "Elapsed: ", timer() - start
print "Writing......."
for i in range(0, outs.shape[0]):
hist[i].tofile(outs[i])
print "==============Done==================="
neighborhood = 4
get_1dlbp_features(neighborhood)
tf = TrainFiles(out_path, labels_file='/kaggle/retina/trainLabels.csv', test_size = 0.0)
X, Y, _, _ = tf.prepare_inputs()
tf.dump_to_csv(path.join(root_path, '1dlbp.csv'), X, Y)
|
anneria/ann17
|
refs/heads/master
|
node_modules/npm/node_modules/node-gyp/gyp/gyptest.py
|
1752
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
orangeduck/PyAutoC
|
refs/heads/master
|
Python27/Lib/test/test_difflib.py
|
87
|
import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestWithAscii(unittest.TestCase):
def test_one_insert(self):
sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('insert', 0, 0, 0, 1),
('equal', 0, 100, 1, 101)])
sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 50, 0, 50),
('insert', 50, 50, 50, 51),
('equal', 50, 100, 51, 101)])
def test_one_delete(self):
sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40)
self.assertAlmostEqual(sm.ratio(), 0.994, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 40, 0, 40),
('delete', 40, 41, 40, 40),
('equal', 41, 81, 40, 80)])
class TestAutojunk(unittest.TestCase):
"""Tests for the autojunk parameter added in 2.7"""
def test_one_insert_homogenous_sequence(self):
# By default autojunk=True and the heuristic kicks in for a sequence
# of length 200+
seq1 = 'b' * 200
seq2 = 'a' + 'b' * 200
sm = difflib.SequenceMatcher(None, seq1, seq2)
self.assertAlmostEqual(sm.ratio(), 0, places=3)
# Now turn the heuristic off
sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False)
self.assertAlmostEqual(sm.ratio(), 0.9975, places=3)
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_range_format_unified(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
Each <range> field shall be of the form:
%1d", <beginning line number> if the range contains exactly one line,
and:
"%1d,%1d", <beginning line number>, <number of lines> otherwise.
If a range is empty, its beginning line number shall be the number of
the line just before the range, or 0 if the empty range starts the file.
'''
fmt = difflib._format_range_unified
self.assertEqual(fmt(3,3), '3,0')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,2')
self.assertEqual(fmt(3,6), '4,3')
self.assertEqual(fmt(0,0), '0,0')
def test_range_format_context(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
The range of lines in file1 shall be written in the following format
if the range contains two or more lines:
"*** %d,%d ****\n", <beginning line number>, <ending line number>
and the following format otherwise:
"*** %d ****\n", <ending line number>
The ending line number of an empty range shall be the number of the preceding line,
or 0 if the range is at the start of the file.
Next, the range of lines in file2 shall be written in the following format
if the range contains two or more lines:
"--- %d,%d ----\n", <beginning line number>, <ending line number>
and the following format otherwise:
"--- %d ----\n", <ending line number>
'''
fmt = difflib._format_range_context
self.assertEqual(fmt(3,3), '3')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,5')
self.assertEqual(fmt(3,6), '4,6')
self.assertEqual(fmt(0,0), '0')
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(
TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,
TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
|
nirmeshk/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/celery/task/schedules.py
|
32
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import warnings
from ..schedules import schedule, crontab_parser, crontab # noqa
from ..exceptions import CDeprecationWarning
warnings.warn(CDeprecationWarning(
"celery.task.schedules is deprecated and renamed to celery.schedules"))
|
dongguangming/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/data/alt_format_91.py
|
11
|
"""Auto-generated file, do not edit by hand. 91 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_91 = [NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{6})', format='\\1 \\2 \\3', leading_digits_pattern=['7(?:0[2-8]|2[0579]|3[057-9]|4[0-389]|6[0-35-9]|[57]|8[0-79])|8(?:0[015689]|1[0-57-9]|2[2356-9]|3[0-57-9]|[45]|6[02457-9]|7[1-69]|8[0124-9]|9[02-9])|9', '7(?:0(?:2[2-9]|[3-7]|8[0-7])|2(?:0[04-9]|5[09]|7[5-8]|9[389])|3(?:0[1-9]|[58]|7[3679]|9[689])|4(?:0[1-9]|1[15-9]|[29][89]|39|8[389])|5(?:[034678]|2[03-9]|5[017-9]|9[7-9])|6(?:0[0-27]|1[0-257-9]|2[0-4]|3[19]|5[4589]|[6-9])|7(?:0[2-9]|[1-79]|8[1-9])|8(?:[0-7]|9[013-9]))|8(?:0(?:[01589]|6[67])|1(?:[02-589]|1[0135-9]|7[0-79])|2(?:[236-9]|5[1-9])|3(?:[0357-9]|4[1-9])|[45]|6[02457-9]|7[1-69]|8(?:[0-26-9]|44|5[2-9])|9(?:[035-9]|2[2-9]|4[0-8]))|9']), NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4 \\5', leading_digits_pattern=['7(?:0[2-8]|2[0579]|3[057-9]|4[0-389]|6[0-35-9]|[57]|8[0-79])|8(?:0[015689]|1[0-57-9]|2[2356-9]|3[0-57-9]|[45]|6[02457-9]|7[1-69]|8[0124-9]|9[02-9])|9', '7(?:0(?:2[2-9]|[3-7]|8[0-7])|2(?:0[04-9]|5[09]|7[5-8]|9[389])|3(?:0[1-9]|[58]|7[3679]|9[689])|4(?:0[1-9]|1[15-9]|[29][89]|39|8[389])|5(?:[034678]|2[03-9]|5[017-9]|9[7-9])|6(?:0[0-27]|1[0-257-9]|2[0-4]|3[19]|5[4589]|[6-9])|7(?:0[2-9]|[1-79]|8[1-9])|8(?:[0-7]|9[013-9]))|8(?:0(?:[01589]|6[67])|1(?:[02-589]|1[0135-9]|7[0-79])|2(?:[236-9]|5[1-9])|3(?:[0357-9]|4[1-9])|[45]|6[02457-9]|7[1-69]|8(?:[0-26-9]|44|5[2-9])|9(?:[035-9]|2[2-9]|4[0-8]))|9'])]
|
ttouchstone/deap
|
refs/heads/master
|
doc/code/tutorials/part_2/2_1_fitness.py
|
14
|
## 2.1 Fitness
from deap import base
from deap import creator
## FitnessMin
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
## FitnessMulti
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, 1.0))
|
copyleftdev/gulpdemo
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
ybenitezf/nstock
|
refs/heads/master
|
modules/unidecode/x09e.py
|
252
|
data = (
'Shu ', # 0x00
'Luo ', # 0x01
'Qi ', # 0x02
'Yi ', # 0x03
'Ji ', # 0x04
'Zhe ', # 0x05
'Yu ', # 0x06
'Zhan ', # 0x07
'Ye ', # 0x08
'Yang ', # 0x09
'Pi ', # 0x0a
'Ning ', # 0x0b
'Huo ', # 0x0c
'Mi ', # 0x0d
'Ying ', # 0x0e
'Meng ', # 0x0f
'Di ', # 0x10
'Yue ', # 0x11
'Yu ', # 0x12
'Lei ', # 0x13
'Bao ', # 0x14
'Lu ', # 0x15
'He ', # 0x16
'Long ', # 0x17
'Shuang ', # 0x18
'Yue ', # 0x19
'Ying ', # 0x1a
'Guan ', # 0x1b
'Qu ', # 0x1c
'Li ', # 0x1d
'Luan ', # 0x1e
'Niao ', # 0x1f
'Jiu ', # 0x20
'Ji ', # 0x21
'Yuan ', # 0x22
'Ming ', # 0x23
'Shi ', # 0x24
'Ou ', # 0x25
'Ya ', # 0x26
'Cang ', # 0x27
'Bao ', # 0x28
'Zhen ', # 0x29
'Gu ', # 0x2a
'Dong ', # 0x2b
'Lu ', # 0x2c
'Ya ', # 0x2d
'Xiao ', # 0x2e
'Yang ', # 0x2f
'Ling ', # 0x30
'Zhi ', # 0x31
'Qu ', # 0x32
'Yuan ', # 0x33
'Xue ', # 0x34
'Tuo ', # 0x35
'Si ', # 0x36
'Zhi ', # 0x37
'Er ', # 0x38
'Gua ', # 0x39
'Xiu ', # 0x3a
'Heng ', # 0x3b
'Zhou ', # 0x3c
'Ge ', # 0x3d
'Luan ', # 0x3e
'Hong ', # 0x3f
'Wu ', # 0x40
'Bo ', # 0x41
'Li ', # 0x42
'Juan ', # 0x43
'Hu ', # 0x44
'E ', # 0x45
'Yu ', # 0x46
'Xian ', # 0x47
'Ti ', # 0x48
'Wu ', # 0x49
'Que ', # 0x4a
'Miao ', # 0x4b
'An ', # 0x4c
'Kun ', # 0x4d
'Bei ', # 0x4e
'Peng ', # 0x4f
'Qian ', # 0x50
'Chun ', # 0x51
'Geng ', # 0x52
'Yuan ', # 0x53
'Su ', # 0x54
'Hu ', # 0x55
'He ', # 0x56
'E ', # 0x57
'Gu ', # 0x58
'Qiu ', # 0x59
'Zi ', # 0x5a
'Mei ', # 0x5b
'Mu ', # 0x5c
'Ni ', # 0x5d
'Yao ', # 0x5e
'Weng ', # 0x5f
'Liu ', # 0x60
'Ji ', # 0x61
'Ni ', # 0x62
'Jian ', # 0x63
'He ', # 0x64
'Yi ', # 0x65
'Ying ', # 0x66
'Zhe ', # 0x67
'Liao ', # 0x68
'Liao ', # 0x69
'Jiao ', # 0x6a
'Jiu ', # 0x6b
'Yu ', # 0x6c
'Lu ', # 0x6d
'Xuan ', # 0x6e
'Zhan ', # 0x6f
'Ying ', # 0x70
'Huo ', # 0x71
'Meng ', # 0x72
'Guan ', # 0x73
'Shuang ', # 0x74
'Lu ', # 0x75
'Jin ', # 0x76
'Ling ', # 0x77
'Jian ', # 0x78
'Xian ', # 0x79
'Cuo ', # 0x7a
'Jian ', # 0x7b
'Jian ', # 0x7c
'Yan ', # 0x7d
'Cuo ', # 0x7e
'Lu ', # 0x7f
'You ', # 0x80
'Cu ', # 0x81
'Ji ', # 0x82
'Biao ', # 0x83
'Cu ', # 0x84
'Biao ', # 0x85
'Zhu ', # 0x86
'Jun ', # 0x87
'Zhu ', # 0x88
'Jian ', # 0x89
'Mi ', # 0x8a
'Mi ', # 0x8b
'Wu ', # 0x8c
'Liu ', # 0x8d
'Chen ', # 0x8e
'Jun ', # 0x8f
'Lin ', # 0x90
'Ni ', # 0x91
'Qi ', # 0x92
'Lu ', # 0x93
'Jiu ', # 0x94
'Jun ', # 0x95
'Jing ', # 0x96
'Li ', # 0x97
'Xiang ', # 0x98
'Yan ', # 0x99
'Jia ', # 0x9a
'Mi ', # 0x9b
'Li ', # 0x9c
'She ', # 0x9d
'Zhang ', # 0x9e
'Lin ', # 0x9f
'Jing ', # 0xa0
'Ji ', # 0xa1
'Ling ', # 0xa2
'Yan ', # 0xa3
'Cu ', # 0xa4
'Mai ', # 0xa5
'Mai ', # 0xa6
'Ge ', # 0xa7
'Chao ', # 0xa8
'Fu ', # 0xa9
'Mian ', # 0xaa
'Mian ', # 0xab
'Fu ', # 0xac
'Pao ', # 0xad
'Qu ', # 0xae
'Qu ', # 0xaf
'Mou ', # 0xb0
'Fu ', # 0xb1
'Xian ', # 0xb2
'Lai ', # 0xb3
'Qu ', # 0xb4
'Mian ', # 0xb5
'[?] ', # 0xb6
'Feng ', # 0xb7
'Fu ', # 0xb8
'Qu ', # 0xb9
'Mian ', # 0xba
'Ma ', # 0xbb
'Mo ', # 0xbc
'Mo ', # 0xbd
'Hui ', # 0xbe
'Ma ', # 0xbf
'Zou ', # 0xc0
'Nen ', # 0xc1
'Fen ', # 0xc2
'Huang ', # 0xc3
'Huang ', # 0xc4
'Jin ', # 0xc5
'Guang ', # 0xc6
'Tian ', # 0xc7
'Tou ', # 0xc8
'Heng ', # 0xc9
'Xi ', # 0xca
'Kuang ', # 0xcb
'Heng ', # 0xcc
'Shu ', # 0xcd
'Li ', # 0xce
'Nian ', # 0xcf
'Chi ', # 0xd0
'Hei ', # 0xd1
'Hei ', # 0xd2
'Yi ', # 0xd3
'Qian ', # 0xd4
'Dan ', # 0xd5
'Xi ', # 0xd6
'Tuan ', # 0xd7
'Mo ', # 0xd8
'Mo ', # 0xd9
'Qian ', # 0xda
'Dai ', # 0xdb
'Chu ', # 0xdc
'You ', # 0xdd
'Dian ', # 0xde
'Yi ', # 0xdf
'Xia ', # 0xe0
'Yan ', # 0xe1
'Qu ', # 0xe2
'Mei ', # 0xe3
'Yan ', # 0xe4
'Jing ', # 0xe5
'Yu ', # 0xe6
'Li ', # 0xe7
'Dang ', # 0xe8
'Du ', # 0xe9
'Can ', # 0xea
'Yin ', # 0xeb
'An ', # 0xec
'Yan ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Zhen ', # 0xf0
'Dai ', # 0xf1
'Can ', # 0xf2
'Yi ', # 0xf3
'Mei ', # 0xf4
'Dan ', # 0xf5
'Yan ', # 0xf6
'Du ', # 0xf7
'Lu ', # 0xf8
'Zhi ', # 0xf9
'Fen ', # 0xfa
'Fu ', # 0xfb
'Fu ', # 0xfc
'Min ', # 0xfd
'Min ', # 0xfe
'Yuan ', # 0xff
)
|
CornerstoneLabs/club-prototype
|
refs/heads/master
|
app-server/classes/models.py
|
1
|
"""Classes."""
from django.db import models
from django.contrib.auth.models import User
import datetime
DAY_NAME = [
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday'
]
DAY_PICKER = [(i, DAY_NAME[i]) for i in range(7)]
HOURS_PICKER = [(i, str(i).rjust(2, '0')) for i in range(24)]
MINUTES_PICKER = [(i, str(i).rjust(2, '0')) for i in range(0, 60, 15)]
class Location(models.Model):
"""Location description."""
title = models.CharField(max_length=2000)
def __str__(self):
"""Return text."""
return self.title
class ClassSchedule(models.Model):
"""A single scheduled item."""
day = models.IntegerField(default=0, choices=DAY_PICKER)
start_hours = models.IntegerField(choices=HOURS_PICKER)
start_minutes = models.IntegerField(choices=MINUTES_PICKER)
end_hours = models.IntegerField(choices=HOURS_PICKER)
end_minutes = models.IntegerField(choices=MINUTES_PICKER)
location = models.ForeignKey(Location)
teacher = models.ManyToManyField(User)
class Class(models.Model):
"""A class definition."""
class_schedule = models.ManyToManyField(ClassSchedule)
title = models.CharField(max_length=1000)
image = models.ImageField(blank=True, null=True)
#
# Semester data for future
#
recurring = models.BooleanField(default=True)
sessions_start = models.DateField(blank=True, null=True)
sessions_end = models.DateField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
max_participants = models.IntegerField(blank=True, null=True)
participants = models.ManyToManyField(User, blank=True, related_name='participant')
def image_url(self):
"""Return image URL."""
if self.image and self.image.url:
return self.image.url
else:
return ''
def __str__(self):
"""Return title."""
return self.title
class ClassSession(models.Model):
"""A single session of a class."""
parent_class = models.ForeignKey(Class)
scheduled_class = models.ForeignKey(ClassSchedule, blank=True, null=True)
session_start = models.DateTimeField()
session_end = models.DateTimeField()
checked_in = models.ManyToManyField(User, blank=True, related_name='checked_in')
def session_day(self):
"""Start date."""
return self.session_start.strftime("%Y-%m-%d")
def start_time(self):
"""Start time."""
return self.session_start.strftime("%H:%M")
def end_time(self):
"""Start time."""
return self.session_end.strftime("%H:%M")
def __str__(self):
"""Return title."""
return '%s %s %s' % (self.parent_class.title, self.session_start, self.session_end)
class ClassSessionNotification(models.Model):
"""Notification for a single session."""
text = models.CharField(max_length=2000)
author = models.ForeignKey(User)
date_published = models.DateField(default=datetime.date.today)
session = models.ForeignKey(ClassSession)
liked = models.ManyToManyField(User, blank=True, related_name='liked')
|
marcuskelly/recover
|
refs/heads/master
|
Lib/site-packages/Crypto/Math/Primality.py
|
4
|
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""Functions to create and test prime numbers.
:undocumented: __package__
"""
from Crypto.Math.Numbers import Integer
from Crypto import Random
COMPOSITE = 0
PROBABLY_PRIME = 1
def miller_rabin_test(candidate, iterations, randfunc=None):
"""Perform a Miller-Rabin primality test on an integer.
The test is specified in Section C.3.1 of `FIPS PUB 186-4`__.
:Parameters:
candidate : integer
The number to test for primality.
iterations : integer
The maximum number of iterations to perform before
declaring a candidate a probable prime.
randfunc : callable
An RNG function where bases are taken from.
:Returns:
``Primality.COMPOSITE`` or ``Primality.PROBABLY_PRIME``.
.. __: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
if not isinstance(candidate, Integer):
candidate = Integer(candidate)
if candidate.is_even():
return COMPOSITE
one = Integer(1)
minus_one = Integer(candidate - 1)
if randfunc is None:
randfunc = Random.new().read
# Step 1 and 2
m = Integer(minus_one)
a = 0
while m.is_even():
m >>= 1
a += 1
# Skip step 3
# Step 4
for i in range(iterations):
# Step 4.1-2
base = 1
while base in (one, minus_one):
base = Integer.random_range(min_inclusive=2,
max_inclusive=candidate - 2)
assert(2 <= base <= candidate - 2)
# Step 4.3-4.4
z = pow(base, m, candidate)
if z in (one, minus_one):
continue
# Step 4.5
for j in range(1, a):
z = pow(z, 2, candidate)
if z == minus_one:
break
if z == one:
return COMPOSITE
else:
return COMPOSITE
# Step 5
return PROBABLY_PRIME
def lucas_test(candidate):
"""Perform a Lucas primality test on an integer.
The test is specified in Section C.3.3 of `FIPS PUB 186-4`__.
:Parameters:
candidate : integer
The number to test for primality.
:Returns:
``Primality.COMPOSITE`` or ``Primality.PROBABLY_PRIME``.
.. __: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
if not isinstance(candidate, Integer):
candidate = Integer(candidate)
# Step 1
if candidate.is_even() or candidate.is_perfect_square():
return COMPOSITE
# Step 2
def alternate():
sgn = 1
value = 5
for x in range(10):
yield sgn * value
sgn, value = -sgn, value + 2
for D in alternate():
js = Integer.jacobi_symbol(D, candidate)
if js == 0:
return COMPOSITE
if js == -1:
break
else:
return COMPOSITE
# Found D. P=1 and Q=(1-D)/4 (note that Q is guaranteed to be an integer)
# Step 3
# This is \delta(n) = n - jacobi(D/n)
K = candidate + 1
# Step 4
r = K.size_in_bits() - 1
# Step 5
# U_1=1 and V_1=P
U_i = Integer(1)
V_i = Integer(1)
U_temp = Integer(0)
V_temp = Integer(0)
# Step 6
for i in range(r - 1, -1, -1):
# Square
# U_temp = U_i * V_i % candidate
U_temp.set(U_i)
U_temp *= V_i
U_temp %= candidate
# V_temp = (((V_i ** 2 + (U_i ** 2 * D)) * K) >> 1) % candidate
V_temp.set(U_i)
V_temp *= U_i
V_temp *= D
V_temp.multiply_accumulate(V_i, V_i)
if V_temp.is_odd():
V_temp += candidate
V_temp >>= 1
V_temp %= candidate
# Multiply
if K.get_bit(i):
# U_i = (((U_temp + V_temp) * K) >> 1) % candidate
U_i.set(U_temp)
U_i += V_temp
if U_i.is_odd():
U_i += candidate
U_i >>= 1
U_i %= candidate
# V_i = (((V_temp + U_temp * D) * K) >> 1) % candidate
V_i.set(V_temp)
V_i.multiply_accumulate(U_temp, D)
if V_i.is_odd():
V_i += candidate
V_i >>= 1
V_i %= candidate
else:
U_i.set(U_temp)
V_i.set(V_temp)
# Step 7
if U_i == 0:
return PROBABLY_PRIME
return COMPOSITE
from Crypto.Util.number import sieve_base as _sieve_base
## The optimal number of small primes to use for the sieve
## is probably dependent on the platform and the candidate size
_sieve_base = _sieve_base[:100]
def test_probable_prime(candidate, randfunc=None):
"""Test if a number is prime.
A number is qualified as prime if it passes a certain
number of Miller-Rabin tests (dependent on the size
of the number, but such that probability of a false
positive is less than 10^-30) and a single Lucas test.
For instance, a 1024-bit candidate will need to pass
4 Miller-Rabin tests.
:Parameters:
candidate : integer
The number to test for primality.
randfunc : callable
The routine to draw random bytes from to select Miller-Rabin bases.
:Returns:
``PROBABLE_PRIME`` if the number if prime with very high probability.
``COMPOSITE`` if the number is a composite.
For efficiency reasons, ``COMPOSITE`` is also returned for small primes.
"""
if randfunc is None:
randfunc = Random.new().read
if not isinstance(candidate, Integer):
candidate = Integer(candidate)
# First, check trial division by the smallest primes
try:
list(map(candidate.fail_if_divisible_by, _sieve_base))
except ValueError:
return False
# These are the number of Miller-Rabin iterations s.t. p(k, t) < 1E-30,
# with p(k, t) being the probability that a randomly chosen k-bit number
# is composite but still survives t MR iterations.
mr_ranges = ((220, 30), (280, 20), (390, 15), (512, 10),
(620, 7), (740, 6), (890, 5), (1200, 4),
(1700, 3), (3700, 2))
bit_size = candidate.size_in_bits()
try:
mr_iterations = list([x for x in mr_ranges if bit_size < x[0]])[0][1]
except IndexError:
mr_iterations = 1
if miller_rabin_test(candidate, mr_iterations,
randfunc=randfunc) == COMPOSITE:
return COMPOSITE
if lucas_test(candidate) == COMPOSITE:
return COMPOSITE
return PROBABLY_PRIME
def generate_probable_prime(**kwargs):
"""Generate a random probable prime.
The prime will not have any specific properties
(e.g. it will not be a *strong* prime).
Random numbers are evaluated for primality until one
passes all tests, consisting of a certain number of
Miller-Rabin tests with random bases followed by
a single Lucas test.
The number of Miller-Rabin iterations is chosen such that
the probability that the output number is a non-prime is
less than 1E-30 (roughly 2^{-100}).
This approach is compliant to `FIPS PUB 186-4`__.
:Keywords:
exact_bits : integer
The desired size in bits of the probable prime.
It must be at least 160.
randfunc : callable
An RNG function where candidate primes are taken from.
prime_filter : callable
A function that takes an Integer as parameter and returns
True if the number can be passed to further primality tests,
False if it should be immediately discarded.
:Return:
A probable prime in the range 2^exact_bits > p > 2^(exact_bits-1).
.. __: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
exact_bits = kwargs.pop("exact_bits", None)
randfunc = kwargs.pop("randfunc", None)
prime_filter = kwargs.pop("prime_filter", lambda x: True)
if kwargs:
print("Unknown parameters:", list(kwargs.keys()))
if exact_bits is None:
raise ValueError("Missing exact_bits parameter")
if exact_bits < 160:
raise ValueError("Prime number is not big enough.")
if randfunc is None:
randfunc = Random.new().read
result = COMPOSITE
while result == COMPOSITE:
candidate = Integer.random(exact_bits=exact_bits,
randfunc=randfunc) | 1
if not prime_filter(candidate):
continue
result = test_probable_prime(candidate, randfunc)
return candidate
def generate_probable_safe_prime(**kwargs):
"""Generate a random, probable safe prime.
Note this operation is much slower than generating a simple prime.
:Keywords:
exact_bits : integer
The desired size in bits of the probable safe prime.
randfunc : callable
An RNG function where candidate primes are taken from.
:Return:
A probable safe prime in the range
2^exact_bits > p > 2^(exact_bits-1).
"""
exact_bits = kwargs.pop("exact_bits", None)
randfunc = kwargs.pop("randfunc", None)
if kwargs:
print("Unknown parameters:", list(kwargs.keys()))
if randfunc is None:
randfunc = Random.new().read
result = COMPOSITE
while result == COMPOSITE:
q = generate_probable_prime(exact_bits=exact_bits - 1, randfunc=randfunc)
candidate = q * 2 + 1
if candidate.size_in_bits() != exact_bits:
continue
result = test_probable_prime(candidate, randfunc=randfunc)
return candidate
|
tmtowtdi/MontyLacuna
|
refs/heads/master
|
lib/lacuna/buildings/boring/corn.py
|
1
|
from lacuna.building import MyBuilding
class corn(MyBuilding):
path = 'corn'
def __init__( self, client, body_id:int = 0, building_id:int = 0 ):
super().__init__( client, body_id, building_id )
|
stryder199/RyarkAssignments
|
refs/heads/master
|
Assignment2/web2py/gluon/contrib/login_methods/openid_auth.py
|
2
|
#!/usr/bin/env python
# coding: utf8
"""
OpenID authentication for web2py
Allowed using OpenID login together with web2py built-in login.
By default, to support OpenID login, put this in your db.py
>>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth
>>> auth.settings.login_form = OpenIDAuth(auth)
To show OpenID list in user profile, you can add the following code
before the end of function user() of your_app/controllers/default.py
+ if (request.args and request.args(0) == "profile"):
+ form = DIV(form, openid_login_form.list_user_openids())
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
More detail in the description of the class OpenIDAuth.
Requirements:
python-openid version 2.2.5 or later
Reference:
* w2p openID
http://w2popenid.appspot.com/init/default/wiki/w2popenid
* RPX and web2py auth module
http://www.web2pyslices.com/main/slices/take_slice/28
* built-in file: gluon/contrib/login_methods/rpx_account.py
* built-in file: gluon/tools.py (Auth class)
"""
import time
from datetime import datetime, timedelta
from gluon.html import *
from gluon.http import redirect
from gluon.storage import Storage, Messages
from gluon.sql import Field, SQLField
from gluon.validators import IS_NOT_EMPTY, IS_NOT_IN_DB
try:
import openid.consumer.consumer
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.extensions.sreg import SRegRequest, SRegResponse
from openid.store import nonce
from openid.consumer.discover import DiscoveryFailure
except ImportError, err:
raise ImportError("OpenIDAuth requires python-openid package")
DEFAULT = lambda: None
class OpenIDAuth(object):
"""
OpenIDAuth
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods
combined with the standard logon/register procedure.
It uses OpenID Consumer when render the form and begins the OpenID
authentication.
Example: (put these code after auth.define_tables() in your models.)
auth = Auth(globals(), db) # authentication/authorization
...
auth.define_tables() # creates all needed tables
...
#include in your model after auth has been defined
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(request, auth, db)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(request, auth, openid_login_form,
signals=['oid','janrain_nonce'])
auth.settings.login_form = extended_login_form
"""
def __init__(self, auth):
self.auth = auth
self.db = auth.db
self.environment = auth.environment
request = self.environment.request
self.nextvar = '_next'
self.realm = 'http://%s' % request.env.http_host
self.login_url = auth.environment.URL(r=request, f='user', args=['login'])
self.return_to_url = self.realm + self.login_url
self.table_alt_logins_name = "alt_logins"
if not auth.settings.table_user:
raise
self.table_user = self.auth.settings.table_user
self.openid_expiration = 15 #minutes
self.messages = self._define_messages()
if not self.table_alt_logins_name in self.db.tables:
self._define_alt_login_table()
def _define_messages(self):
messages = Messages(self.environment.T)
messages.label_alt_login_username = 'Sign-in with OpenID: '
messages.label_add_alt_login_username = 'Add a new OpenID: '
messages.submit_button = 'Sign in'
messages.submit_button_add = 'Add'
messages.a_delete = 'Delete'
messages.comment_openid_signin = 'What is OpenID?'
messages.comment_openid_help_title = 'Start using your OpenID'
messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/'
messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?'
messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.'
messages.flash_openid_associated = 'OpenID associated'
messages.flash_associate_openid = 'Please login or register an account for this OpenID.'
messages.p_openid_not_registered = "This Open ID haven't be registered. " \
+ "Please login to associate with it or register an account for it."
messages.flash_openid_authenticated = 'OpenID authenticated successfully.'
messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)'
messages.flash_openid_canceled = 'OpenID authentication canceled by user.'
messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.'
messages.h_openid_login = 'OpenID Login'
messages.h_openid_list = 'OpenID List'
return messages
def _define_alt_login_table(self):
"""
Define the OpenID login table.
Note: type is what I used for our project. We're going to support 'fackbook' and
'plurk' alternate login methods. Otherwise it's always 'openid' and you
may not need it. This should be easy to changed.
(Just remove the field of "type" and remove the
"and db.alt_logins.type == type_" in _find_matched_openid function)
"""
db = self.db
table = db.define_table(
self.table_alt_logins_name,
Field('username', length=512, default=''),
Field('type', length=128, default='openid', readable=False),
Field('user', self.table_user, readable=False),
)
table.username.requires = IS_NOT_IN_DB(db, table.username)
self.table_alt_logins = table
def logout_url(self, next):
"""
Delete the w2popenid record in session as logout
"""
if self.environment.session.w2popenid:
del(self.environment.session.w2popenid)
return next
def login_form(self):
"""
Start to process the OpenID response if 'janrain_nonce' in request parameters
and not processed yet. Else return the OpenID form for login.
"""
request = self.environment.request
if request.vars.has_key('janrain_nonce') and not self._processed():
self._process_response()
return self.auth()
return self._form()
def get_user(self):
"""
It supports the logout_url, implementing the get_user and login_form
for cas usage of gluon.tools.Auth.
"""
environment = self.environment
request = environment.request
args = request.args
if args[0] == 'logout':
return True # Let logout_url got called
if environment.session.w2popenid:
w2popenid = environment.session.w2popenid
db = self.db
if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated
if self._w2popenid_expired(w2popenid):
del(self.environment.session.w2popenid)
flash = self.messages.flash_openid_expired
environment.session.warning = flash
redirect(self.auth.settings.login_url)
oid = self._remove_protocol(w2popenid.oid)
alt_login = self._find_matched_openid(db, oid)
nextvar = self.nextvar
# This OpenID not in the database. If user logged in then add it
# into database, else ask user to login or register.
if not alt_login:
if self.auth.is_logged_in():
# TODO: ask first maybe
self._associate_user_openid(self.auth.user, oid)
if self.environment.session.w2popenid:
del(self.environment.session.w2popenid)
environment.session.flash = self.messages.flash_openid_associated
if request.vars.has_key(nextvar):
redirect(request.vars[nextvar])
redirect(self.auth.settings.login_next)
if not request.vars.has_key(nextvar):
# no next var, add it and do login again
# so if user login or register can go back here to associate the OpenID
redirect(self.environment.URL(r=request,
args=['login'],
vars={nextvar:self.login_url}))
self.login_form = self._form_with_notification()
environment.session.flash = self.messages.flash_associate_openid
return None # need to login or register to associate this openid
# Get existed OpenID user
user = db(self.table_user.id==alt_login.user).select().first()
if user:
if self.environment.session.w2popenid:
del(self.environment.session.w2popenid)
if 'username' in self.table_user.fields():
username = 'username'
elif 'email' in self.table_user.fields():
username = 'email'
return {username: user[username]} if user else None # login success (almost)
return None # just start to login
def _find_matched_openid(self, db, oid, type_='openid'):
"""
Get the matched OpenID for given
"""
query = ((db.alt_logins.username == oid) & (db.alt_logins.type == type_))
alt_login = db(query).select().first() # Get the OpenID record
return alt_login
def _associate_user_openid(self, user, oid):
"""
Associate the user logged in with given OpenID
"""
# print "[DB] %s authenticated" % oid
self.db.alt_logins.insert(username=oid, user=user.id)
def _form_with_notification(self):
"""
Render the form for normal login with a notice of OpenID authenticated
"""
form = DIV()
# TODO: check when will happen
if self.auth.settings.login_form in (self.auth, self):
self.auth.settings.login_form = self.auth
form = DIV(self.auth())
register_note = DIV(P(self.messages.p_openid_not_registered))
form.components.append(register_note)
return lambda: form
def _remove_protocol(self, oid):
"""
Remove https:// or http:// from oid url
"""
protocol = 'https://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
protocol = 'http://'
if oid.startswith(protocol):
oid = oid[len(protocol):]
return oid
return oid
def _init_consumerhelper(self):
"""
Initialize the ConsumerHelper
"""
if not hasattr(self, "consumerhelper"):
self.consumerhelper = ConsumerHelper(self.environment.session,
self.db)
return self.consumerhelper
def _form(self, style=None):
form = DIV(H3(self.messages.h_openid_login), self._login_form(style))
return form
def _login_form(self,
openid_field_label=None,
submit_button=None,
_next=None,
style=None):
"""
Render the form for OpenID login
"""
def warning_openid_fail(session):
session.warning = messages.openid_fail_discover
style = style or """
background-attachment: scroll;
background-repeat: no-repeat;
background-image: url("http://wiki.openid.net/f/openid-16x16.gif");
background-position: 0% 50%;
background-color: transparent;
padding-left: 18px;
width: 400px;
"""
style = style.replace("\n","")
request = self.environment.request
session = self.environment.session
messages = self.messages
hidden_next_input = ""
if _next == 'profile':
profile_url = self.environment.URL(r=request, f='user', args=['profile'])
hidden_next_input = INPUT(_type="hidden", _name="_next", _value=profile_url)
form = FORM(openid_field_label or self.messages.label_alt_login_username,
INPUT(_type="input", _name="oid",
requires=IS_NOT_EMPTY(error_message=messages.openid_fail_discover),
_style=style),
hidden_next_input,
INPUT(_type="submit", _value=submit_button or messages.submit_button),
" ",
A(messages.comment_openid_signin,
_href=messages.comment_openid_help_url,
_title=messages.comment_openid_help_title,
_class='openid-identifier',
_target="_blank"),
_action=self.login_url
)
if form.accepts(request.vars, session):
oid = request.vars.oid
consumerhelper = self._init_consumerhelper()
url = self.login_url
return_to_url = self.return_to_url
if not oid:
warning_openid_fail(session)
redirect(url)
try:
if request.vars.has_key('_next'):
return_to_url = self.return_to_url + '?_next=' + request.vars._next
url = consumerhelper.begin(oid, self.realm, return_to_url)
except DiscoveryFailure:
warning_openid_fail(session)
redirect(url)
return form
def _processed(self):
"""
Check if w2popenid authentication is processed.
Return True if processed else False.
"""
processed = (hasattr(self.environment.session, 'w2popenid') and
self.environment.session.w2popenid.ok is True)
return processed
def _set_w2popenid_expiration(self, w2popenid):
"""
Set expiration for OpenID authentication.
"""
w2popenid.expiration = datetime.now() + timedelta(minutes=self.openid_expiration)
def _w2popenid_expired(self, w2popenid):
"""
Check if w2popenid authentication is expired.
Return True if expired else False.
"""
return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration)
def _process_response(self):
"""
Process the OpenID by ConsumerHelper.
"""
environment = self.environment
request = environment.request
request_vars = request.vars
consumerhelper = self._init_consumerhelper()
process_status = consumerhelper.process_response(request_vars, self.return_to_url)
if process_status == "success":
w2popenid = environment.session.w2popenid
user_data = self.consumerhelper.sreg()
environment.session.w2popenid.ok = True
self._set_w2popenid_expiration(w2popenid)
w2popenid.user_data = user_data
environment.session.flash = self.messages.flash_openid_authenticated
elif process_status == "failure":
flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message
environment.session.warning = flash
elif process_status == "cancel":
environment.session.warning = self.messages.flash_openid_canceled
elif process_status == "setup_needed":
environment.session.warning = self.messages.flash_openid_need_setup
def list_user_openids(self):
messages = self.messages
environment = self.environment
request = environment.request
if request.vars.has_key('delete_openid'):
self.remove_openid(request.vars.delete_openid)
query = self.db.alt_logins.user == self.auth.user.id
alt_logins = self.db(query).select()
l = []
for alt_login in alt_logins:
username = alt_login.username
delete_href = environment.URL(r=request, f='user',
args=['profile'],
vars={'delete_openid': username})
delete_link = A(messages.a_delete, _href=delete_href)
l.append(LI(username, " ", delete_link))
profile_url = environment.URL(r=request, f='user', args=['profile'])
#return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url
openid_list = DIV(H3(messages.h_openid_list), UL(l),
self._login_form(
_next='profile',
submit_button=messages.submit_button_add,
openid_field_label=messages.label_add_alt_login_username)
)
return openid_list
def remove_openid(self, openid):
query = self.db.alt_logins.username == openid
self.db(query).delete()
class ConsumerHelper(object):
"""
ConsumerHelper knows the python-openid and
"""
def __init__(self, session, db):
self.session = session
store = self._init_store(db)
self.consumer = openid.consumer.consumer.Consumer(session, store)
def _init_store(self, db):
"""
Initialize Web2pyStore
"""
if not hasattr(self, "store"):
store = Web2pyStore(db)
session = self.session
if not session.has_key('w2popenid'):
session.w2popenid = Storage()
self.store = store
return self.store
def begin(self, oid, realm, return_to_url):
"""
Begin the OpenID authentication
"""
w2popenid = self.session.w2popenid
w2popenid.oid = oid
auth_req = self.consumer.begin(oid)
auth_req.addExtension(SRegRequest(required=['email','nickname']))
url = auth_req.redirectURL(return_to=return_to_url, realm=realm)
return url
def process_response(self, request_vars, return_to_url):
"""
Complete the process and
"""
resp = self.consumer.complete(request_vars, return_to_url)
if resp:
if resp.status == openid.consumer.consumer.SUCCESS:
self.resp = resp
if hasattr(resp, "identity_url"):
self.session.w2popenid.oid = resp.identity_url
return "success"
if resp.status == openid.consumer.consumer.FAILURE:
self.error_message = resp.message
return "failure"
if resp.status == openid.consumer.consumer.CANCEL:
return "cancel"
if resp.status == openid.consumer.consumer.SETUP_NEEDED:
return "setup_needed"
return "no resp"
def sreg(self):
"""
Try to get OpenID Simple Registation
http://openid.net/specs/openid-simple-registration-extension-1_0.html
"""
if self.resp:
resp = self.resp
sreg_resp = SRegResponse.fromSuccessResponse(resp)
return sreg_resp.data if sreg_resp else None
else:
return None
class Web2pyStore(OpenIDStore):
"""
Web2pyStore
This class implements the OpenIDStore interface. OpenID stores take care
of persisting nonces and associations. The Janrain Python OpenID library
comes with implementations for file and memory storage. Web2pyStore uses
the web2py db abstration layer. See the source code docs of OpenIDStore
for a comprehensive description of this interface.
"""
def __init__(self, database):
self.database = database
self.table_oid_associations_name = 'oid_associations'
self.table_oid_nonces_name = 'oid_nonces'
self._initDB()
def _initDB(self):
if self.table_oid_associations_name not in self.database:
self.database.define_table(self.table_oid_associations_name,
SQLField('server_url', 'string', length=2047, required=True),
SQLField('handle', 'string', length=255, required=True),
SQLField('secret', 'blob', required=True),
SQLField('issued', 'integer', required=True),
SQLField('lifetime', 'integer', required=True),
SQLField('assoc_type', 'string', length=64, required=True)
)
if self.table_oid_nonces_name not in self.database:
self.database.define_table(self.table_oid_nonces_name,
SQLField('server_url', 'string', length=2047, required=True),
SQLField('timestamp', 'integer', required=True),
SQLField('salt', 'string', length=40, required=True)
)
def storeAssociation(self, server_url, association):
"""
Store associations. If there already is one with the same
server_url and handle in the table replace it.
"""
db = self.database
query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == association.handle)
db(query).delete()
db.oid_associations.insert(server_url = server_url,
handle = association.handle,
secret = association.secret,
issued = association.issued,
lifetime = association.lifetime,
assoc_type = association.assoc_type), 'insert '*10
def getAssociation(self, server_url, handle=None):
"""
Return the association for server_url and handle. If handle is
not None return the latests associations for that server_url.
Return None if no association can be found.
"""
db = self.database
query = (db.oid_associations.server_url == server_url)
if handle:
query &= (db.oid_associations.handle == handle)
rows = db(query).select(orderby=db.oid_associations.issued)
keep_assoc, _ = self._removeExpiredAssocations(rows)
if len(keep_assoc) == 0:
return None
else:
assoc = keep_assoc.pop() # pop the last one as it should be the latest one
return Association(assoc['handle'],
assoc['secret'],
assoc['issued'],
assoc['lifetime'],
assoc['assoc_type'])
def removeAssociation(self, server_url, handle):
db = self.database
query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == handle)
return db(query).delete() != None
def useNonce(self, server_url, timestamp, salt):
"""
This method returns Falase if a nonce has been used before or its
timestamp is not current.
"""
db = self.database
if abs(timestamp - time.time()) > nonce.SKEW:
return False
query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.timestamp == timestamp) & (db.oid_nonces.salt == salt)
if db(query).count() > 0:
return False
else:
db.oid_nonces.insert(server_url = server_url,
timestamp = timestamp,
salt = salt)
return True
def _removeExpiredAssocations(self, rows):
"""
This helper function is not part of the interface. Given a list of
association rows it checks which associations have expired and
deletes them from the db. It returns a tuple of the form
([valid_assoc], no_of_expired_assoc_deleted).
"""
db = self.database
keep_assoc = []
remove_assoc = []
t1970 = time.time()
for r in rows:
if r['issued'] + r['lifetime'] < t1970:
remove_assoc.append(r)
else:
keep_assoc.append(r)
for r in remove_assoc:
del db.oid_associations[r['id']]
return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations)
def cleanupNonces(self):
"""
Remove expired nonce entries from DB and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_nonces.timestamp < time.time() - nonce.SKEW)
return db(query).delete()
def cleanupAssociations(self):
"""
Remove expired associations from db and return the number
of entries deleted.
"""
db = self.database
query = (db.oid_associations.id > 0)
return self._removeExpiredAssocations(db(query).select())[1] #return number of assoc removed
def cleanup(self):
"""
This method should be run periodically to free the db from
expired nonce and association entries.
"""
return self.cleanupNonces(), self.cleanupAssociations()
|
Mj258/weiboapi
|
refs/heads/master
|
srapyDemo/envs/Lib/site-packages/zope/interface/tests/odd.py
|
79
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Odd meta class that doesn't subclass type.
This is used for testing support for ExtensionClass in new interfaces.
>>> class A(object):
... __metaclass__ = MetaClass
... a = 1
...
>>> A.__name__
'A'
>>> A.__bases__ == (object,)
True
>>> class B(object):
... __metaclass__ = MetaClass
... b = 1
...
>>> class C(A, B): pass
...
>>> C.__name__
'C'
>>> int(C.__bases__ == (A, B))
1
>>> a = A()
>>> aa = A()
>>> a.a
1
>>> aa.a
1
>>> aa.a = 2
>>> a.a
1
>>> aa.a
2
>>> c = C()
>>> c.a
1
>>> c.b
1
>>> c.b = 2
>>> c.b
2
>>> C.c = 1
>>> c.c
1
>>> import sys
>>> if sys.version[0] == '2': # This test only makes sense under Python 2.x
... from types import ClassType
... assert not isinstance(C, (type, ClassType))
>>> int(C.__class__.__class__ is C.__class__)
1
"""
# class OddClass is an odd meta class
class MetaMetaClass(type):
def __getattribute__(self, name):
if name == '__class__':
return self
return type.__getattribute__(self, name)
class MetaClass(object):
"""Odd classes
"""
__metaclass__ = MetaMetaClass
def __init__(self, name, bases, dict):
self.__name__ = name
self.__bases__ = bases
self.__dict__.update(dict)
def __call__(self):
return OddInstance(self)
def __getattr__(self, name):
for b in self.__bases__:
v = getattr(b, name, self)
if v is not self:
return v
raise AttributeError(name)
def __repr__(self):
return "<odd class %s at %s>" % (self.__name__, hex(id(self)))
class OddInstance(object):
def __init__(self, cls):
self.__dict__['__class__'] = cls
def __getattribute__(self, name):
dict = object.__getattribute__(self, '__dict__')
if name == '__dict__':
return dict
v = dict.get(name, self)
if v is not self:
return v
return getattr(dict['__class__'], name)
def __setattr__(self, name, v):
self.__dict__[name] = v
def __delattr__(self, name):
del self.__dict__[name]
def __repr__(self):
return "<odd %s instance at %s>" % (
self.__class__.__name__, hex(id(self)))
# DocTest:
if __name__ == "__main__":
import doctest, __main__
doctest.testmod(__main__, isprivate=lambda *a: False)
|
Samuel789/MediPi
|
refs/heads/master
|
MedManagementWeb/env/lib/python3.5/site-packages/django/conf/locale/zh_Hans/formats.py
|
1008
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
sambayless/monosat
|
refs/heads/master
|
examples/python/nqueens.py
|
1
|
#!/usr/bin/env python3
# Authors Rémi Delmas & Christophe Garion
#License CC BY-NC-SA 3.0
#This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
#3.0 Unported license (CC BY-NC-SA 3.0)
#You are free to Share (copy, distribute and transmite) and to Remix (adapt) this work under the following conditions:
#
#Attribution – You must attribute the work in the manner specified by the author or licensor (but not in
#any way that suggests that they endorse you or your use of the work).
#Noncommercial – You may not use this work for commercial purposes.
#Share Alike – If you alter, transform, or build upon this work, you may distribute the resulting work only
#under the same or similar license to this one.
#See http://creativecommons.org/licenses/by-nc-sa/3.0/ for more details.
import sys
import time
from monosat import *
if len(sys.argv) != 2:
print("You should give the number of queens as parameter!")
exit(1)
print("N-queens in MonoSAT")
VERBOSE = False
nb_queens = int(sys.argv[1])
if VERBOSE:
print("Problem with " + str(nb_queens) + " queens")
def print_cell(row, col):
if QMATRIX[row][col].value():
print("1 ", end="")
else:
print("0 ", end="")
def print_matrix():
for row in range(nb_queens):
print()
for col in range(nb_queens):
print_cell(row, col)
print()
time_start_gen = time.process_time()
# a matrix storing variables
QMATRIX = [[Var() for x in range(nb_queens)] for x in range(nb_queens)]
# row clauses
for row in range(nb_queens):
# build clause for existence of a queen in a row
c = [QMATRIX[row][col] for col in range(nb_queens)]
AssertClause(c)
# if queen is in a column, no other queen in the row
for col in range(nb_queens):
for other in range(nb_queens):
if other != col:
if VERBOSE:
print("~({0},{1}), ~({2},{3})".format(row, col, row, other))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[row][other])])
# column clauses
for col in range(nb_queens):
# build clause for existence of a queen in a column
c = [QMATRIX[row][col] for row in range(nb_queens)]
AssertClause(c)
# if queen is in a row, no other queen in the column
for row in range(nb_queens):
for other in range(nb_queens):
if other != row:
if VERBOSE:
print("~({0},{1}), ~({2},{3})".format(row, col, other, col))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[other][col])])
# diag clauses: setting a queen, compute all exclusion for diags
for row in range(nb_queens):
for col in range(nb_queens):
for x in range(1, min(nb_queens - row, nb_queens - col)):
if VERBOSE:
print('~({0}, {1}), ~({2}, {3})'.format(row, col, row+x, col+x))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[row+x][col+x])])
for x in range(1, min(row, col) + 1):
if VERBOSE:
print('~({0}, {1}), ~({2}, {3})'.format(row, col, row-x, col-x))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[row-x][col-x])])
for x in range(1, min(row, nb_queens - 1 - col) + 1):
if VERBOSE:
print('~({0}, {1}), ~({2}, {3})'.format(row, col, row-x, col+x))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[row-x][col+x])])
for x in range(1, min(nb_queens - 1 - row, col) + 1):
if VERBOSE:
print('~({0}, {1}), ~({2}, {3})'.format(row, col, row-x, col-x))
AssertClause([Not(QMATRIX[row][col]), Not(QMATRIX[row+x][col-x])])
time_end_gen = time.process_time()
time_start_solve = time.process_time()
result = Solve()
time_end_solve = time.process_time()
if result:
print_matrix()
else:
print("UNSAT!")
print("time needed to generate clauses: " + str(time_end_gen - time_start_gen) + "s")
print("time needed to solve problem: " + str(time_end_solve - time_start_solve) + "s")
|
ferranti/watchdog
|
refs/heads/master
|
import/parse/rvdb.py
|
3
|
# historical_voting.py - Parse and import historical voting by county
# fors years 1964 - 2004
# Copyright (C) 2008 didier deshommes <dfdeshom@gmail.com>
STATE_CODES = '../data/crawl/manual/rvdb/state_codes'
DATA_PATH = '../data/crawl/manual/rvdb/allYears/'
import glob
def read_state_codes(fname=STATE_CODES):
"""Turn `fname` into a dict."""
state_codes = {}
for line in file(fname).readlines():
line = line.split(' ',1)
state_codes[line[0]] = line[1].strip().title()
return state_codes
def parse_historical_voting():
"""
Parse county-level data. The data is in the format:
STATE_CODE COUNTY_NAME DEMOCRAT_COUNT REPUBLICAN_COUNT OTHER_COUNT
"""
state_codes = read_state_codes()
files = glob.glob(DATA_PATH + '*')
for fname in files[:-1]: # skip junk file
for line in file(fname).readlines():
code, county_name, numbers = line.split('"')
dem_count, rep_count, other_count = numbers.split()
state = state_codes[code.strip()]
yield {
'n_democrats': dem_count,
'n_republicans': rep_count,
'n_other': other_count,
'state_name': state,
'state_fips': code.strip(),
'county_name': county_name,
'year': fname.split('/')[-1]
}
if __name__ == "__main__":
import tools
tools.export(parse_historical_voting())
|
XiaosongWei/blink-crosswalk
|
refs/heads/master
|
PRESUBMIT.py
|
16
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = ()
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckWatchlist(input_api, output_api):
"""Check that the WATCHLIST file parses correctly."""
errors = []
for f in input_api.AffectedFiles():
if f.LocalPath() != 'WATCHLISTS':
continue
import StringIO
import logging
import watchlists
log_buffer = StringIO.StringIO()
log_handler = logging.StreamHandler(log_buffer)
log_handler.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.addHandler(log_handler)
wl = watchlists.Watchlists(input_api.change.RepositoryRoot())
logger.removeHandler(log_handler)
log_handler.flush()
log_buffer.flush()
if log_buffer.getvalue():
errors.append(output_api.PresubmitError(
'Cannot parse WATCHLISTS file, please resolve.',
log_buffer.getvalue().splitlines()))
return errors
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS,
maxlen=800, license_header=license_header))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckTestExpectations(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckChromiumPlatformMacros(input_api, output_api))
results.extend(_CheckWatchlist(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _CheckTestExpectations(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
if any(path.startswith('LayoutTests') for path in local_paths):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'lint-test-expectations')
_, errs = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
return [output_api.PresubmitError(
"lint-test-expectations failed "
"to produce output; check by hand. ")]
if errs.strip() != 'Lint succeeded.':
return [output_api.PresubmitError(errs)]
return []
def _CheckStyle(input_api, output_api):
style_checker_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'check-webkit-style')
args = ([input_api.python_executable, style_checker_path, '--diff-files']
+ [f.LocalPath() for f in input_api.AffectedFiles()])
results = []
try:
child = input_api.subprocess.Popen(args,
stderr=input_api.subprocess.PIPE)
_, stderrdata = child.communicate()
if child.returncode != 0:
results.append(output_api.PresubmitError(
'check-webkit-style failed', [stderrdata]))
except Exception as e:
results.append(output_api.PresubmitNotifyResult(
'Could not run check-webkit-style', [str(e)]))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.realpath(input_api.os_path.join(
input_api.PresubmitLocalPath(), '..', '..', 'buildtools', 'checkdeps'))]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(
input_api.os_path.join(input_api.PresubmitLocalPath()))
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckChromiumPlatformMacros(input_api, output_api, source_file_filter=None):
"""Ensures that Blink code uses WTF's platform macros instead of
Chromium's. Using the latter has resulted in at least one subtle
build breakage."""
os_macro_re = input_api.re.compile(r'^\s*#(el)?if.*\bOS_')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not os_macro_re.search(x),
input_api, source_file_filter)
errors = ['Found use of Chromium OS_* macro in %s. '
'Use WTF platform macros instead.' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def _CheckForPrintfDebugging(input_api, output_api):
"""Generally speaking, we'd prefer not to land patches that printf
debug output."""
printf_re = input_api.re.compile(r'^\s*printf\(')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not printf_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptOrNotify(
'printf debugging is best debugging! That said, it might '
'be a good idea to drop the following occurances from '
'your patch before uploading:\n%s' % '\n'.join(errors))]
return []
def _CheckForDangerousTestFunctions(input_api, output_api):
"""Tests should not be using serveAsynchronousMockedRequests, since it does
not guarantee that the threaded HTML parser will have completed."""
serve_async_requests_re = input_api.re.compile(
r'serveAsynchronousMockedRequests')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not serve_async_requests_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitError(
'You should be using FrameTestHelpers::'
'pumpPendingRequests() instead of '
'serveAsynchronousMockedRequests() in the following '
'locations:\n%s' % '\n'.join(errors))]
return []
def _CheckForFailInFile(input_api, f):
pattern = input_api.re.compile('^FAIL')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
path = input_api.os_path.join(
'..', '..', 'tools', 'checkperms', 'checkperms.py')
args = [sys.executable, path, '--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
checkperms = input_api.subprocess.Popen(
args, stdout=input_api.subprocess.PIPE)
errors = checkperms.communicate()[0].strip()
if errors:
return [output_api.PresubmitError(
'checkperms.py failed.', errors.splitlines())]
return []
def _CheckForInvalidPreferenceError(input_api, output_api):
pattern = input_api.re.compile('Invalid name for preference: (.+)')
results = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('-expected.txt'):
continue
for line_num, line in f.ChangedContents():
error = pattern.search(line)
if error:
results.append(output_api.PresubmitError('Found an invalid preference %s in expected result %s:%s' % (error.group(1), f, line_num)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckStyle(input_api, output_api))
results.extend(_CheckForPrintfDebugging(input_api, output_api))
results.extend(_CheckForDangerousTestFunctions(input_api, output_api))
results.extend(_CheckForInvalidPreferenceError(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://blink-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.blink': {
'android_blink_compile_dbg': set(['defaulttests']),
'android_blink_compile_rel': set(['defaulttests']),
'android_chromium_gn_compile_rel': set(['defaulttests']),
'linux_blink_compile_dbg': set(['defaulttests']),
'linux_blink_rel': set(['defaulttests']),
'linux_chromium_gn_rel': set(['defaulttests']),
'mac_blink_compile_dbg': set(['defaulttests']),
'mac_blink_rel': set(['defaulttests']),
'win_blink_compile_dbg': set(['defaulttests']),
'win_blink_rel': set(['defaulttests']),
},
}
|
z0by/django
|
refs/heads/master
|
django/contrib/gis/geoip/__init__.py
|
700
|
"""
This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R)
C API (http://www.maxmind.com/app/c). This is an alternative to the GPL
licensed Python GeoIP interface provided by MaxMind.
GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts.
For IP-based geolocation, this module requires the GeoLite Country and City
datasets, in binary format (CSV will not work!). The datasets may be
downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/.
Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory
corresponding to settings.GEOIP_PATH.
"""
__all__ = ['HAS_GEOIP']
try:
from .base import GeoIP, GeoIPException
HAS_GEOIP = True
__all__ += ['GeoIP', 'GeoIPException']
except RuntimeError: # libgeoip.py raises a RuntimeError if no GeoIP library is found
HAS_GEOIP = False
|
arulalant/mmDiagnosis
|
refs/heads/master
|
diagnosis1/gui_quickly/py_2.6.6/diagnosis/diagnosis/dialog.py
|
3
|
import gtk
class DialogBox():
def __init__(self, builder):
self.dialog = builder.get_object('dialogBox')
self.dialogLabel = builder.get_object('dialog_label')
#self.diagnosis = builder.get_object('diagnosis_window')
def title(self, txt):
self.dialog.set_title(txt)
def run(self, txt):
self.dialogLabel.set_text(txt)
#self.diagnosis.set_sensitive(False)
return self.dialog.run()
def hide(self):
self.dialog.hide()
#self.diagnosis.set_sensitive(True)
def destroy(self):
self.dialog.destroy()
#self.diagnosis.set_sensitive(True)
|
Jionglun/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/__main__.py
|
737
|
"""Main entry point"""
import sys
if sys.argv[0].endswith("__main__.py"):
import os.path
# We change sys.argv[0] to make help message more useful
# use executable without path, unquoted
# (it's just a hint anyway)
# (if you have spaces in your executable you get what you deserve!)
executable = os.path.basename(sys.executable)
sys.argv[0] = executable + " -m unittest"
del os
__unittest = True
from .main import main, TestProgram, USAGE_AS_MAIN
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/testRunner/__init__.py
|
981
|
__author__ = 'traff'
|
deffi/protoplot
|
refs/heads/master
|
plotlib/pl_container.py
|
1
|
from plotlib import OptionsContainer
class Container(list):
'''
* A list
* Has an 'options' property, which is an OptionsContainer representing
default options for all contained items
* Has an add method whigh creates and adds an instance of a pre-defined
class (klass)
'''
def __init__(self, klass, *args, **kwargs):
super().__init__(*args, **kwargs)
self._klass = klass
self.options = OptionsContainer()
def add(self, *args, **kwargs):
content = self._klass(*args, **kwargs)
self.append(content)
return content
|
mchaparro/intercambios
|
refs/heads/master
|
intercambios/views/evento.py
|
1
|
from django.http import *
from django.shortcuts import render_to_response,redirect
from django.template import RequestContext
from django.conf import settings
from intercambios.models import Evento, ParticipantesEvento, Usuario
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
import pytz
import json
from django.conf import settings
import urllib
import datetime
def _json_object_hook(d): return namedtuple('object', d.keys())(*d.values())
def json2obj(data): return json.loads(data, object_hook=_json_object_hook)
@login_required
def crear_evento(request):
if request.method == 'POST':
local_TZ = pytz.timezone(settings.TIME_ZONE)
nombre_evento = request.POST['nombre_evento']
nombre_evento = ' '.join(nombre_evento.split())
fecha = request.POST['fecha']
participantes = request.POST['participantes']
if not int(participantes) > 1:
messages.warning(request, '<h2 class="Diamond">El evento debe tener más de 1 participante</h2>')
return HttpResponseRedirect('/')
precio = request.POST['precio']
precio = precio.replace("$", "")
precio = precio.replace(",", "")
fecha_evento = datetime.datetime.strptime(fecha, '%m/%d/%Y').date()
evento = Evento(admin=request.user, nombre=nombre_evento, precio=precio, numero_participantes=participantes, fecha_evento=fecha_evento)
evento.save()
ParticipantesEvento.objects.get_or_create(usuario = request.user, evento = evento)
messages.success(request, '<h1 class="Diamond">%s!! ahora eres el administrador del evento <b>%s</b></br>debes mandar el link de tu evento a los demás participantes</h1>' % (request.user.nombre,evento.nombre))
return HttpResponse('{"evento": "%s"}' % evento.id, content_type='application/json')
return render_to_response('crear_evento.html', context_instance=RequestContext(request))
@login_required
def perfil_usuario(request):
if request.method == 'POST':
nombre = request.POST['nombre']
apodo = request.POST['apodo']
next = request.POST['next']
if nombre:
user = request.user
user.nombre = ' '.join(nombre.split())
user.apodo = ' '.join(apodo.split())
user.save()
messages.success(request, '<h1 class="Diamond">%s!! Se edito tu perfil con exito</h1>' % (request.user.nombre))
return HttpResponseRedirect(next)
return render_to_response('perfil_usuario.html', context_instance=RequestContext(request))
def perfil_usuario_modal(request):
if not request.user.is_authenticated():
return HttpResponse('Es necesario que vuelvas a iniciar sesion')
if request.method == 'POST':
nombre = request.POST['nombre']
apodo = request.POST['apodo']
if nombre:
user = request.user
user.nombre = ' '.join(nombre.split())
user.apodo = ' '.join(apodo.split())
user.save()
messages.success(request, '<h1 class="Diamond">%s!! Se edito tu perfil con exito</h1>' % (request.user.nombre))
return redirect('mis_eventos')
return render_to_response('perfil_usuario_modal.html', context_instance=RequestContext(request))
def format_fecha_delta(td):
dias = abs(td.days)
horas, remainder = divmod(td.seconds, 3600)
minutos, segundos = divmod(remainder, 60)
fecha_delta = {
'dias' : "%02d" % dias,
'horas' : "%02d" % horas,
'minutos' : "%02d" % minutos,
'segundos' : "%02d" % segundos
}
return fecha_delta
@login_required
def detalles_evento(request, id):
try:
evento = Evento.objects.get(id=id, estado='activo')
except:
messages.warning(request, '<h2 class="Diamond">No existe el evento seleccionado</h2>')
return HttpResponseRedirect('/')
participantes = evento.participantes_evento.all()
try:
regala_a = ParticipantesEvento.objects.get(evento_id=id,usuario=request.user)
regala_a = regala_a.intercambio
except:
messages.warning(request, '<h2 class="Diamond">No puedes acceder al evento: <b>"%s"</b> </br>es necesario solicitar una invitacion a %s</h2>' % (evento.nombre, evento.admin.nombre))
return HttpResponseRedirect('/')
fecha_evento = datetime.datetime.combine(evento.fecha_evento, datetime.time.min)
fecha_delta = fecha_evento - datetime.datetime.today()
fecha_delta = format_fecha_delta(fecha_delta)
if datetime.datetime.today().date() == evento.fecha_evento:
fecha_delta['dias'] = '00'
fecha_delta['horas'] = '00'
fecha_delta['minutos'] = '00'
fecha_delta['segundos'] = '00'
data={
'fecha_delta' : fecha_delta,
'nuevo_evento':evento,
'participantes':participantes,
'participante_admin': participantes.get(usuario=request.user),
'participantes_faltantes':evento.numero_participantes-participantes.count(),
'regala_a': regala_a
}
return render_to_response('detalles_evento.html',data, context_instance=RequestContext(request))
@login_required
def mis_eventos(request):
mis_eventos = request.user.mis_eventos.all().filter(estado="activo")
# url = 'http://intercambios-node.herokuapp.com/eventos/usuario/%s/' % request.user.id
# raw = urllib.urlopen(url)
# mis_eventos = raw.readlines()
# mis_eventos = json.loads(mis_eventos[0])
data={
'eventos_participa':list(mis_eventos)
}
return render_to_response('index.html',data, context_instance=RequestContext(request))
def editar_evento(request, id):
if not request.user.is_authenticated():
return HttpResponse('Es necesario que vuelvas a iniciar sesion')
try:
evento = Evento.objects.get(id=id,estado='activo')
except:
messages.warning(request, '<h2 class="Diamond">No existe ese evento</h2>')
return HttpResponseRedirect('/')
if not evento.admin == request.user:
return HttpResponseRedirect('/')
if request.method == 'POST':
local_TZ = pytz.timezone(settings.TIME_ZONE)
nombre_evento = request.POST['nombre_evento']
fecha = request.POST['fecha']
participantes = request.POST['participantes']
if not int(participantes) > 1:
messages.warning(request, '<h2 class="Diamond">El evento debe tener mas de 1 participante</h2>')
return HttpResponseRedirect('/detalles/evento/%s/' % evento.id)
precio = request.POST['precio']
precio = precio.replace("$", "")
precio = precio.replace(",", "")
if int(evento.participantes.count()) > int(participantes):
messages.warning(request, '<h2 class="Diamond">La cantidad de participantes es menor a los participantes actuales</h2>')
return HttpResponseRedirect('/detalles/evento/%s/' % evento.id)
fecha_evento = datetime.datetime.strptime(fecha, '%m/%d/%Y').date()
evento.nombre=nombre_evento
evento.precio=precio
evento.numero_participantes=participantes
evento.fecha_evento=fecha_evento
evento.save()
messages.warning(request, '<h2 class="Diamond">Se edito correctamente el evento %s</h2>' % evento.nombre)
return HttpResponseRedirect('/detalles/evento/%s/' % evento.id)
data={
'evento':evento,
}
return render_to_response('editar_evento.html',data, context_instance=RequestContext(request))
def cancelar_evento(request):
eventoID = request.POST['evento']
evento = Evento.objects.get(id=eventoID)
if request.user == evento.admin:
evento.estado='cancelado'
evento.save()
return HttpResponse('{}', content_type='application/json')
def borrar_participante(request):
participanteID = request.POST['participante']
eventoID = request.POST['evento']
evento = Evento.objects.get(id=eventoID)
participante = ParticipantesEvento.objects.get(id=participanteID)
if request.user == evento.admin:
participante.delete()
return HttpResponse('{}', content_type='application/json')
|
osh/gr-eventsim
|
refs/heads/master
|
apps/test_data_gen.py
|
1
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Test Data Gen
# Generated: Sat Dec 26 20:32:06 2015
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
class test_data_gen(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Test Data Gen")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 25e6
##################################################
# Blocks
##################################################
self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex*1, int(samp_rate*10))
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_gr_complex*1, "test.dat", False)
self.blocks_file_sink_0.set_unbuffered(False)
self.analog_fastnoise_source_x_0 = analog.fastnoise_source_c(analog.GR_GAUSSIAN, 1, 0, 8192)
##################################################
# Connections
##################################################
self.connect((self.analog_fastnoise_source_x_0, 0), (self.blocks_head_0, 0))
self.connect((self.blocks_head_0, 0), (self.blocks_file_sink_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.blocks_head_0.set_length(int(self.samp_rate*10))
def main(top_block_cls=test_data_gen, options=None):
tb = top_block_cls()
tb.start()
tb.wait()
if __name__ == '__main__':
main()
|
nhippenmeyer/django
|
refs/heads/master
|
django/db/backends/postgresql/introspection.py
|
326
|
from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and description.default and 'nextval' in description.default:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6]
+ (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
|
misisnik/ExternalInterface
|
refs/heads/master
|
ENV/Lib/encodings/aliases.py
|
726
|
""" Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1125 codec
'1125' : 'cp1125',
'ibm1125' : 'cp1125',
'cp866u' : 'cp1125',
'ruscii' : 'cp1125',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp273 codec
'273' : 'cp273',
'ibm273' : 'cp273',
'csibm273' : 'cp273',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macintosh' : 'mac_roman',
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
# temporary mac CJK aliases, will be replaced by proper codecs in 3.1
'x_mac_japanese' : 'shift_jis',
'x_mac_korean' : 'euc_kr',
'x_mac_simp_chinese' : 'gb2312',
'x_mac_trad_chinese' : 'big5',
}
|
amaleewilson/apartment-rating
|
refs/heads/master
|
apt_rater_basic/apartment.py
|
1
|
# This class stores information about apartments
class Apartment:
def __init__(self, name, distances_from_work):
self.attributes = dict([("name", name), ("distances", distances_from_work)])
self.relevant_attributes = dict([("monthly_cost_of_commuting", calculateMonthlyCommute(self.attributes["distances"]))])
def add_basic_attrs():
#one-time move in fees
self.attributes["deposit"] = input("deposit: ")
self.attributes["app_fee"] = input("application fee: ")
self.attributes["pet_deposit"] = input("pet deposit: ")
#monthly fees
self.attributes["rent"] = input("rent: ")
self.attributes["pet_rent"] = input("pet rent: ")
self.attributes["other_fees"] = input("other monthly fees: ")
self.attributes["net"] = input("is internet included? (True or False): ")
self.attributes["elec"] = input("is electricity included? (True or False): ")
self.attributes["water"] = input("is water included? (True or False): ")
#space
self.attributes["square_feet"] = input("square feet: ")
self.attributes["bed_count"] = input("number of bedrooms: ")
self.attributes["bath_count"] = input("number of bathrooms: ")
self.attributes["other_count"] = input("number of other rooms: ")
def calculateMonthlyCommute(distances):
total_miles_driven_per_month = 0
for d in distances:
total_miles_driven_per_month += d * 5 * 2 * 5 # 5 day per week * 2 trips per day * ~5 weeks per month because better to overestimate
return total_miles_driven_per_month * .34 # Estimating 34 cents per mile
# this should be replaced with an actual test
if __name__ == "__main__":
apt = Apartment("Breckenridge", [10, 12])
print(apt.relevant_attributes["monthly_cost_of_commuting"])
|
sungkim11/mhargadh
|
refs/heads/master
|
django/views/decorators/cache.py
|
229
|
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return wraps(viewfunc, assigned=available_attrs(viewfunc))(_cache_controlled)
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view_func)
|
holmes/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/dirstate.py
|
90
|
# dirstate.py - working directory tracking for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import errno
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding
import os, stat, errno, gc
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7fffffff
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
class dirstate(object):
def __init__(self, opener, ui, root, validate):
'''Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
'''
self._opener = opener
self._validate = validate
self._root = root
self._rootdir = os.path.join(root, '')
self._dirty = False
self._dirtypl = False
self._lastnormaltime = 0
self._ui = ui
self._filecache = {}
@propertycache
def _map(self):
'''Return the dirstate contents as a map from filename to
(state, mode, size, time).'''
self._read()
return self._map
@propertycache
def _copymap(self):
self._read()
return self._copymap
@propertycache
def _foldmap(self):
f = {}
for name, s in self._map.iteritems():
if s[0] != 'r':
f[util.normcase(name)] = name
for name in self._dirs:
f[util.normcase(name)] = name
f['.'] = '.' # prevents useless util.fspath() invocation
return f
@repocache('branch')
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return "default"
@propertycache
def _pl(self):
try:
fp = self._opener("dirstate")
st = fp.read(40)
fp.close()
l = len(st)
if l == 40:
return st[:20], st[20:40]
elif l > 0 and l < 40:
raise util.Abort(_('working directory state appears damaged!'))
except IOError, err:
if err.errno != errno.ENOENT:
raise
return [nullid, nullid]
@propertycache
def _dirs(self):
return scmutil.dirs(self._map, 'r')
def dirs(self):
return self._dirs
@rootcache('.hgignore')
def _ignore(self):
files = [self._join('.hgignore')]
for name, path in self._ui.configitems("ui"):
if name == 'ignore' or name.startswith('ignore.'):
files.append(util.expandpath(path))
return ignore.ignore(self._root, files, self._ui.warn)
@propertycache
def _slash(self):
return self._ui.configbool('ui', 'slash') and os.sep != '/'
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.checkcase(self._join('.hg'))
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return 'l'
if util.statisexec(st):
return 'x'
except OSError:
pass
return ''
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return 'l'
if 'x' in fallback(x):
return 'x'
return ''
return f
if self._checkexec:
def f(x):
if 'l' in fallback(x):
return 'l'
if util.isexec(self._join(x)):
return 'x'
return ''
return f
else:
return fallback
def getcwd(self):
cwd = os.getcwd()
if cwd == self._root:
return ''
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += os.sep
if cwd.startswith(rootsep):
return cwd[len(rootsep):]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
'''Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
'''
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
for x in sorted(self._map):
yield x
def iteritems(self):
return self._map.iteritems()
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
# Discard 'm' markers when moving away from a merge state
for f, s in self._map.iteritems():
if s[0] == 'm':
if f in self._copymap:
copies[f] = self._copymap[f]
self.normallookup(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener('branch', 'w', atomictemp=True)
try:
f.write(self._branch + '\n')
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache['_branch']
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def _read(self):
self._map = {}
self._copymap = {}
try:
st = self._opener.read("dirstate")
except IOError, err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
gcenabled = gc.isenabled()
gc.disable()
try:
p = parsers.parse_dirstate(self._map, self._copymap, st)
finally:
if gcenabled:
gc.enable()
if not self._dirtypl:
self._pl = p
def invalidate(self):
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if source is not None:
self._copymap[dest] = source
elif dest in self._copymap:
del self._copymap[dest]
def copied(self, file):
return self._copymap.get(file, None)
def copies(self):
return self._copymap
def _droppath(self, f):
if self[f] not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if f in self._dirs:
raise util.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in scmutil.finddirs(f):
if d in self._dirs:
break
if d in self._map and self[d] != 'r':
raise util.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
self._dirty = True
self._map[f] = (state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
mtime = int(s.st_mtime)
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
if f in self._copymap:
del self._copymap[f]
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self._pl[1] != nullid and f in self._map:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map[f]
if entry[0] == 'r' and entry[2] in (-1, -2):
source = self._copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
return
self._addpath(f, 'n', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
if self._pl[1] == nullid:
raise util.Abort(_("setting %r to other parent "
"only allowed in merges") % f)
self._addpath(f, 'n', 0, -2, -1)
if f in self._copymap:
del self._copymap[f]
def add(self, f):
'''Mark a file added.'''
self._addpath(f, 'a', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
self._droppath(f)
size = 0
if self._pl[1] != nullid and f in self._map:
# backup the previous state
entry = self._map[f]
if entry[0] == 'm': # merge
size = -1
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map[f] = ('r', 0, size, 0)
if size == 0 and f in self._copymap:
del self._copymap[f]
def merge(self, f):
'''Mark a file merged.'''
if self._pl[1] == nullid:
return self.normallookup(f)
s = os.lstat(self._join(f))
self._addpath(f, 'm', s.st_mode,
s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
if f in self._copymap:
del self._copymap[f]
def drop(self, f):
'''Drop a file from the dirstate'''
if f in self._map:
self._dirty = True
self._droppath(f)
del self._map[f]
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._foldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and '/' in path:
d, f = path.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if '/' in normed:
d, f = normed.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
self._foldmap[normed] = folded
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
'''
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
'''
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map = {}
if "_dirs" in self.__dict__:
delattr(self, "_dirs")
self._copymap = {}
self._pl = [nullid, nullid]
self._lastnormaltime = 0
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
changedfiles = changedfiles or allfiles
oldmap = self._map
self.clear()
for f in allfiles:
if f not in changedfiles:
self._map[f] = oldmap[f]
else:
if 'x' in allfiles.flags(f):
self._map[f] = ('n', 0777, -1, 0)
else:
self._map[f] = ('n', 0666, -1, 0)
self._pl = (parent, nullid)
self._dirty = True
def write(self):
if not self._dirty:
return
st = self._opener("dirstate", "w", atomictemp=True)
def finish(s):
st.write(s)
st.close()
self._lastnormaltime = 0
self._dirty = self._dirtypl = False
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime
finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
def _dirignore(self, f):
if f == '.':
return False
if self._ignore(f):
return True
for p in scmutil.finddirs(f):
if self._ignore(p):
return True
return False
def walk(self, match, subrepos, unknown, ignored):
'''
Walk recursively through the directory tree, finding all files
matched by match.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
'''
def fwarn(f, msg):
self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
return False
def badtype(mode):
kind = _('unknown')
if stat.S_ISCHR(mode):
kind = _('character device')
elif stat.S_ISBLK(mode):
kind = _('block device')
elif stat.S_ISFIFO(mode):
kind = _('fifo')
elif stat.S_ISSOCK(mode):
kind = _('socket')
elif stat.S_ISDIR(mode):
kind = _('directory')
return _('unsupported file type (type is %s)') % kind
ignore = self._ignore
dirignore = self._dirignore
if ignored:
ignore = util.never
dirignore = util.never
elif not unknown:
# if unknown and ignored are False, skip step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
badfn = match.bad
dmap = self._map
normpath = util.normpath
listdir = osutil.listdir
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
work = []
wadd = work.append
exact = skipstep3 = False
if matchfn == match.exact: # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.files() and not match.anypats(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
skipstep3 = False
else:
normalize = None
files = sorted(match.files())
subrepos.sort()
i, j = 0, 0
while i < len(files) and j < len(subrepos):
subpath = subrepos[j] + "/"
if files[i] < subpath:
i += 1
continue
while i < len(files) and files[i].startswith(subpath):
del files[i]
j += 1
if not files or '.' in files:
files = ['']
results = dict.fromkeys(subrepos)
results['.hg'] = None
# step 1: find all explicit files
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
else:
nf = normpath(ff)
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
skipstep3 = False
if nf in dmap:
#file deleted on disk but still in dirstate
results[nf] = None
match.dir(nf)
if not dirignore(nf):
wadd(nf)
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError, inst:
if nf in dmap: # does it exactly match a file?
results[nf] = None
else: # does it match a directory?
prefix = nf + "/"
for fn in dmap:
if fn.startswith(prefix):
match.dir(nf)
skipstep3 = False
break
else:
badfn(ff, inst.strerror)
# step 2: visit subdirectories
while work:
nd = work.pop()
skip = None
if nd == '.':
nd = ''
else:
skip = '.hg'
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError, inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
fwarn(nd, inst.strerror)
continue
raise
for f, kind, st in entries:
if normalize:
nf = normalize(nd and (nd + "/" + f) or f, True, True)
else:
nf = nd and (nd + "/" + f) or f
if nf not in results:
if kind == dirkind:
if not ignore(nf):
match.dir(nf)
wadd(nf)
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(nf)) and not ignore(nf):
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for s in subrepos:
del results[s]
del results['.hg']
# step 3: report unseen items in the dmap hash
if not skipstep3 and not exact:
if not results and matchalways:
visit = dmap.keys()
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked the full directory tree above.
# So if a file is not seen it was either a) not matching matchfn
# b) ignored, c) missing, or d) under a symlink directory.
audit_path = scmutil.pathauditor(self._root)
for nf in iter(visit):
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
if audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat everything we missed.
nf = iter(visit).next
for st in util.statfiles([join(i) for i in visit]):
results[nf()] = st
return results
def status(self, match, subrepos, ignored, clean, unknown):
'''Determine the status of the working copy relative to the
dirstate and return a tuple of lists (unsure, modified, added,
removed, deleted, unknown, ignored, clean), where:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
added:
files that have been explicitly added with hg add
removed:
files that have been explicitly removed with hg remove
deleted:
files that have been deleted through other means ("missing")
unknown:
files not in the dirstate that are not ignored
ignored:
files not in the dirstate that are ignored
(by _dirignore())
clean:
files that have definitely not been modified since the
dirstate was written
'''
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirignore = self._dirignore
checkexec = self._checkexec
checklink = self._checklink
copymap = self._copymap
lastnormaltime = self._lastnormaltime
lnkkind = stat.S_IFLNK
for fn, st in self.walk(match, subrepos, listunknown,
listignored).iteritems():
if fn not in dmap:
if (listignored or mexact(fn)) and dirignore(fn):
if listignored:
iadd(fn)
elif listunknown:
uadd(fn)
continue
state, mode, size, time = dmap[fn]
if not st and state in "nma":
dadd(fn)
elif state == 'n':
# The "mode & lnkkind != lnkkind or self._checklink"
# lines are an expansion of "islink => checklink"
# where islink means "is this a link?" and checklink
# means "can we check links?".
mtime = int(st.st_mtime)
if (size >= 0 and
((size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0100 and checkexec))
and (mode & lnkkind != lnkkind or checklink)
or size == -2 # other parent
or fn in copymap):
madd(fn)
elif ((time != mtime and time != mtime & _rangemask)
and (mode & lnkkind != lnkkind or checklink)):
ladd(fn)
elif mtime == lastnormaltime:
# fn may have been changed in the same timeslot without
# changing its size. This can happen if we quickly do
# multiple commits in a single transaction.
# Force lookup, so we don't miss such a racy file change.
ladd(fn)
elif listclean:
cadd(fn)
elif state == 'm':
madd(fn)
elif state == 'a':
aadd(fn)
elif state == 'r':
radd(fn)
return (lookup, modified, added, removed, deleted, unknown, ignored,
clean)
|
SuperMass/distOS-lab3
|
refs/heads/master
|
src/test/p1.py
|
1
|
def change_dict(test_dict):
if '1' in test_dict:
test_dict['1'] = 5
|
candsvincent/edgesense
|
refs/heads/master
|
python/run_script.py
|
3
|
import sys
from edgesense.build_network import main as build_network_main
from edgesense.catalyst_server import main as catalyst_server_main
from edgesense.drupal_script import main as drupal_script_main
from edgesense.parse_catalyst import main as parse_catalyst_main
from edgesense.parse_mailinglist import main as parse_mailinglist_main
from edgesense.parse_tweets import main as parse_tweets_main
functions = {
'build_network': build_network_main,
'catalyst_server': catalyst_server_main,
'drupal_script': drupal_script_main,
'parse_catalyst': parse_catalyst_main,
'parse_mailinglist': parse_mailinglist_main,
'parse_tweets': parse_tweets_main
}
if __name__ == "__main__":
what = sys.argv[1]
sys.argv.remove(what)
if functions.has_key(what):
functions[what]()
else:
print('Wrong function called, valid: '+', '.join(function.keys()))
|
rbuffat/pyidf
|
refs/heads/master
|
pyidf/simulation_parameters.py
|
1
|
""" Data objects in group "Simulation Parameters"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class Version(DataObject):
"""Corresponds to IDD object `Version` Specifies the EnergyPlus version of
the IDF file."""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'version identifier',
{'name': u'Version Identifier',
'pyname': u'version_identifier',
'default': u'8.4',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'Version',
'pyname': u'Version',
'required-object': False,
'unique-object': True}
@property
def version_identifier(self):
"""field `Version Identifier`
| Default value: 8.4
Args:
value (str): value for IDD Field `Version Identifier`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `version_identifier` or None if not set
"""
return self["Version Identifier"]
@version_identifier.setter
def version_identifier(self, value="8.4"):
"""Corresponds to IDD field `Version Identifier`"""
self["Version Identifier"] = value
class SimulationControl(DataObject):
""" Corresponds to IDD object `SimulationControl`
Note that the following 3 fields are related to the Sizing:Zone, Sizing:System,
and Sizing:Plant objects. Having these fields set to Yes but no corresponding
Sizing object will not cause the sizing to be done. However, having any of these
fields set to No, the corresponding Sizing object is ignored.
Note also, if you want to do system sizing, you must also do zone sizing in the same
run or an error will result.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'do zone sizing calculation',
{'name': u'Do Zone Sizing Calculation',
'pyname': u'do_zone_sizing_calculation',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'do system sizing calculation',
{'name': u'Do System Sizing Calculation',
'pyname': u'do_system_sizing_calculation',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'do plant sizing calculation',
{'name': u'Do Plant Sizing Calculation',
'pyname': u'do_plant_sizing_calculation',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'run simulation for sizing periods',
{'name': u'Run Simulation for Sizing Periods',
'pyname': u'run_simulation_for_sizing_periods',
'default': u'Yes',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'run simulation for weather file run periods',
{'name': u'Run Simulation for Weather File Run Periods',
'pyname': u'run_simulation_for_weather_file_run_periods',
'default': u'Yes',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'do hvac sizing simulation for sizing periods',
{'name': u'Do HVAC Sizing Simulation for Sizing Periods',
'pyname': u'do_hvac_sizing_simulation_for_sizing_periods',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'maximum number of hvac sizing simulation passes',
{'name': u'Maximum Number of HVAC Sizing Simulation Passes',
'pyname': u'maximum_number_of_hvac_sizing_simulation_passes',
'default': 1,
'required-field': False,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 5,
'name': u'SimulationControl',
'pyname': u'SimulationControl',
'required-object': False,
'unique-object': True}
@property
def do_zone_sizing_calculation(self):
"""field `Do Zone Sizing Calculation`
| If Yes, Zone sizing is accomplished from corresponding Sizing:Zone objects
| and autosize fields.
| Default value: No
Args:
value (str): value for IDD Field `Do Zone Sizing Calculation`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `do_zone_sizing_calculation` or None if not set
"""
return self["Do Zone Sizing Calculation"]
@do_zone_sizing_calculation.setter
def do_zone_sizing_calculation(self, value="No"):
"""Corresponds to IDD field `Do Zone Sizing Calculation`"""
self["Do Zone Sizing Calculation"] = value
@property
def do_system_sizing_calculation(self):
"""field `Do System Sizing Calculation`
| If Yes, System sizing is accomplished from corresponding Sizing:System objects
| and autosize fields.
| If Yes, Zone sizing (previous field) must also be Yes.
| Default value: No
Args:
value (str): value for IDD Field `Do System Sizing Calculation`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `do_system_sizing_calculation` or None if not set
"""
return self["Do System Sizing Calculation"]
@do_system_sizing_calculation.setter
def do_system_sizing_calculation(self, value="No"):
"""Corresponds to IDD field `Do System Sizing Calculation`"""
self["Do System Sizing Calculation"] = value
@property
def do_plant_sizing_calculation(self):
"""field `Do Plant Sizing Calculation`
| If Yes, Plant sizing is accomplished from corresponding Sizing:Plant objects
| and autosize fields.
| Default value: No
Args:
value (str): value for IDD Field `Do Plant Sizing Calculation`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `do_plant_sizing_calculation` or None if not set
"""
return self["Do Plant Sizing Calculation"]
@do_plant_sizing_calculation.setter
def do_plant_sizing_calculation(self, value="No"):
"""Corresponds to IDD field `Do Plant Sizing Calculation`"""
self["Do Plant Sizing Calculation"] = value
@property
def run_simulation_for_sizing_periods(self):
"""field `Run Simulation for Sizing Periods`
| If Yes, SizingPeriod:* objects are executed and results from those may be displayed..
| Default value: Yes
Args:
value (str): value for IDD Field `Run Simulation for Sizing Periods`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `run_simulation_for_sizing_periods` or None if not set
"""
return self["Run Simulation for Sizing Periods"]
@run_simulation_for_sizing_periods.setter
def run_simulation_for_sizing_periods(self, value="Yes"):
"""Corresponds to IDD field `Run Simulation for Sizing Periods`"""
self["Run Simulation for Sizing Periods"] = value
@property
def run_simulation_for_weather_file_run_periods(self):
"""field `Run Simulation for Weather File Run Periods`
| If Yes, RunPeriod:* objects are executed and results from those may be displayed..
| Default value: Yes
Args:
value (str): value for IDD Field `Run Simulation for Weather File Run Periods`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `run_simulation_for_weather_file_run_periods` or None if not set
"""
return self["Run Simulation for Weather File Run Periods"]
@run_simulation_for_weather_file_run_periods.setter
def run_simulation_for_weather_file_run_periods(self, value="Yes"):
"""Corresponds to IDD field `Run Simulation for Weather File Run
Periods`"""
self["Run Simulation for Weather File Run Periods"] = value
@property
def do_hvac_sizing_simulation_for_sizing_periods(self):
"""field `Do HVAC Sizing Simulation for Sizing Periods`
| If Yes, SizingPeriod:* objects are exectuted additional times for advanced sizing.
| Currently limited to use with coincident plant sizing, see Sizing:Plant object
| Default value: No
Args:
value (str): value for IDD Field `Do HVAC Sizing Simulation for Sizing Periods`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `do_hvac_sizing_simulation_for_sizing_periods` or None if not set
"""
return self["Do HVAC Sizing Simulation for Sizing Periods"]
@do_hvac_sizing_simulation_for_sizing_periods.setter
def do_hvac_sizing_simulation_for_sizing_periods(self, value="No"):
"""Corresponds to IDD field `Do HVAC Sizing Simulation for Sizing
Periods`"""
self["Do HVAC Sizing Simulation for Sizing Periods"] = value
@property
def maximum_number_of_hvac_sizing_simulation_passes(self):
"""field `Maximum Number of HVAC Sizing Simulation Passes`
| the entire set of SizingPeriod:* objects may be repeated to fine tune size results
| this input sets a limit on the number of passes that the sizing algorithms can repeate the set
| Default value: 1
| value >= 1
Args:
value (int): value for IDD Field `Maximum Number of HVAC Sizing Simulation Passes`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `maximum_number_of_hvac_sizing_simulation_passes` or None if not set
"""
return self["Maximum Number of HVAC Sizing Simulation Passes"]
@maximum_number_of_hvac_sizing_simulation_passes.setter
def maximum_number_of_hvac_sizing_simulation_passes(self, value=1):
"""Corresponds to IDD field `Maximum Number of HVAC Sizing Simulation
Passes`"""
self["Maximum Number of HVAC Sizing Simulation Passes"] = value
class Building(DataObject):
"""Corresponds to IDD object `Building` Describes parameters that are used
during the simulation of the building.
There are necessary correlations between the entries for
this object and some entries in the Site:WeatherStation and
Site:HeightVariation objects, specifically the Terrain field.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'default': u'NONE',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'north axis',
{'name': u'North Axis',
'pyname': u'north_axis',
'default': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'deg'}),
(u'terrain',
{'name': u'Terrain',
'pyname': u'terrain',
'default': u'Suburbs',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Country',
u'Suburbs',
u'City',
u'Ocean',
u'Urban'],
'autocalculatable': False,
'type': 'alpha'}),
(u'loads convergence tolerance value',
{'name': u'Loads Convergence Tolerance Value',
'pyname': u'loads_convergence_tolerance_value',
'default': 0.04,
'minimum>': 0.0,
'maximum': 0.5,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'temperature convergence tolerance value',
{'name': u'Temperature Convergence Tolerance Value',
'pyname': u'temperature_convergence_tolerance_value',
'default': 0.4,
'minimum>': 0.0,
'maximum': 0.5,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'deltaC'}),
(u'solar distribution',
{'name': u'Solar Distribution',
'pyname': u'solar_distribution',
'default': u'FullExterior',
'required-field': False,
'autosizable': False,
'accepted-values': [u'MinimalShadowing',
u'FullExterior',
u'FullInteriorAndExterior',
u'FullExteriorWithReflections',
u'FullInteriorAndExteriorWithReflections'],
'autocalculatable': False,
'type': 'alpha'}),
(u'maximum number of warmup days',
{'name': u'Maximum Number of Warmup Days',
'pyname': u'maximum_number_of_warmup_days',
'default': 25,
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer'}),
(u'minimum number of warmup days',
{'name': u'Minimum Number of Warmup Days',
'pyname': u'minimum_number_of_warmup_days',
'default': 6,
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 8,
'name': u'Building',
'pyname': u'Building',
'required-object': True,
'unique-object': True}
@property
def name(self):
"""field `Name`
| Default value: NONE
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value="NONE"):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def north_axis(self):
"""field `North Axis`
| degrees from true North
| Units: deg
Args:
value (float): value for IDD Field `North Axis`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `north_axis` or None if not set
"""
return self["North Axis"]
@north_axis.setter
def north_axis(self, value=None):
"""Corresponds to IDD field `North Axis`"""
self["North Axis"] = value
@property
def terrain(self):
"""field `Terrain`
| Country=FlatOpenCountry | Suburbs=CountryTownsSuburbs | City=CityCenter | Ocean=body of water (5km) | Urban=Urban-Industrial-Forest
| Default value: Suburbs
Args:
value (str): value for IDD Field `Terrain`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `terrain` or None if not set
"""
return self["Terrain"]
@terrain.setter
def terrain(self, value="Suburbs"):
"""Corresponds to IDD field `Terrain`"""
self["Terrain"] = value
@property
def loads_convergence_tolerance_value(self):
"""field `Loads Convergence Tolerance Value`
| Loads Convergence Tolerance Value is a fraction of load
| Default value: 0.04
| value <= 0.5
Args:
value (float): value for IDD Field `Loads Convergence Tolerance Value`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `loads_convergence_tolerance_value` or None if not set
"""
return self["Loads Convergence Tolerance Value"]
@loads_convergence_tolerance_value.setter
def loads_convergence_tolerance_value(self, value=0.04):
"""Corresponds to IDD field `Loads Convergence Tolerance Value`"""
self["Loads Convergence Tolerance Value"] = value
@property
def temperature_convergence_tolerance_value(self):
"""field `Temperature Convergence Tolerance Value`
| Units: deltaC
| Default value: 0.4
| value <= 0.5
Args:
value (float): value for IDD Field `Temperature Convergence Tolerance Value`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `temperature_convergence_tolerance_value` or None if not set
"""
return self["Temperature Convergence Tolerance Value"]
@temperature_convergence_tolerance_value.setter
def temperature_convergence_tolerance_value(self, value=0.4):
"""Corresponds to IDD field `Temperature Convergence Tolerance
Value`"""
self["Temperature Convergence Tolerance Value"] = value
@property
def solar_distribution(self):
"""field `Solar Distribution`
| MinimalShadowing | FullExterior | FullInteriorAndExterior | FullExteriorWithReflections | FullInteriorAndExteriorWithReflections
| Default value: FullExterior
Args:
value (str): value for IDD Field `Solar Distribution`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `solar_distribution` or None if not set
"""
return self["Solar Distribution"]
@solar_distribution.setter
def solar_distribution(self, value="FullExterior"):
"""Corresponds to IDD field `Solar Distribution`"""
self["Solar Distribution"] = value
@property
def maximum_number_of_warmup_days(self):
"""field `Maximum Number of Warmup Days`
| EnergyPlus will only use as many warmup days as needed to reach convergence tolerance.
| This field's value should NOT be set less than 25.
| Default value: 25
Args:
value (int): value for IDD Field `Maximum Number of Warmup Days`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `maximum_number_of_warmup_days` or None if not set
"""
return self["Maximum Number of Warmup Days"]
@maximum_number_of_warmup_days.setter
def maximum_number_of_warmup_days(self, value=25):
"""Corresponds to IDD field `Maximum Number of Warmup Days`"""
self["Maximum Number of Warmup Days"] = value
@property
def minimum_number_of_warmup_days(self):
"""field `Minimum Number of Warmup Days`
| The minimum number of warmup days that produce enough temperature and flux history
| to start EnergyPlus simulation for all reference buildings was suggested to be 6.
| When this field is greater than the maximum warmup days defined previous field
| the maximum number of warmup days will be reset to the minimum value entered here.
| Warmup days will be set to be the value you entered when it is less than the default 6.
| Default value: 6
Args:
value (int): value for IDD Field `Minimum Number of Warmup Days`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_number_of_warmup_days` or None if not set
"""
return self["Minimum Number of Warmup Days"]
@minimum_number_of_warmup_days.setter
def minimum_number_of_warmup_days(self, value=6):
"""Corresponds to IDD field `Minimum Number of Warmup Days`"""
self["Minimum Number of Warmup Days"] = value
class ShadowCalculation(DataObject):
"""Corresponds to IDD object `ShadowCalculation` This object is used to
control details of the solar, shading, and daylighting models."""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'calculation method',
{'name': u'Calculation Method',
'pyname': u'calculation_method',
'default': u'AverageOverDaysInFrequency',
'required-field': False,
'autosizable': False,
'accepted-values': [u'AverageOverDaysInFrequency',
u'TimestepFrequency'],
'autocalculatable': False,
'type': 'alpha'}),
(u'calculation frequency',
{'name': u'Calculation Frequency',
'pyname': u'calculation_frequency',
'default': 20,
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'}),
(u'maximum figures in shadow overlap calculations',
{'name': u'Maximum Figures in Shadow Overlap Calculations',
'pyname': u'maximum_figures_in_shadow_overlap_calculations',
'default': 15000,
'required-field': False,
'autosizable': False,
'minimum': 200,
'autocalculatable': False,
'type': u'integer'}),
(u'polygon clipping algorithm',
{'name': u'Polygon Clipping Algorithm',
'pyname': u'polygon_clipping_algorithm',
'required-field': False,
'autosizable': False,
'accepted-values': [u'ConvexWeilerAtherton',
u'SutherlandHodgman'],
'autocalculatable': False,
'type': 'alpha'}),
(u'sky diffuse modeling algorithm',
{'name': u'Sky Diffuse Modeling Algorithm',
'pyname': u'sky_diffuse_modeling_algorithm',
'required-field': False,
'autosizable': False,
'accepted-values': [u'SimpleSkyDiffuseModeling',
u'DetailedSkyDiffuseModeling'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'ShadowCalculation',
'pyname': u'ShadowCalculation',
'required-object': False,
'unique-object': True}
@property
def calculation_method(self):
"""field `Calculation Method`
| choose calculation method. note that TimestepFrequency is only needed for certain cases
| and can increase execution time significantly.
| Default value: AverageOverDaysInFrequency
Args:
value (str): value for IDD Field `Calculation Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `calculation_method` or None if not set
"""
return self["Calculation Method"]
@calculation_method.setter
def calculation_method(self, value="AverageOverDaysInFrequency"):
"""Corresponds to IDD field `Calculation Method`"""
self["Calculation Method"] = value
@property
def calculation_frequency(self):
"""field `Calculation Frequency`
| enter number of days
| this field is only used if the previous field is set to AverageOverDaysInFrequency
| 0=Use Default Periodic Calculation|<else> calculate every <value> day
| only really applicable to RunPeriods
| warning issued if >31
| Default value: 20
| value >= 1
Args:
value (int): value for IDD Field `Calculation Frequency`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `calculation_frequency` or None if not set
"""
return self["Calculation Frequency"]
@calculation_frequency.setter
def calculation_frequency(self, value=20):
"""Corresponds to IDD field `Calculation Frequency`"""
self["Calculation Frequency"] = value
@property
def maximum_figures_in_shadow_overlap_calculations(self):
"""field `Maximum Figures in Shadow Overlap Calculations`
| Number of allowable figures in shadow overlap calculations
| Default value: 15000
| value >= 200
Args:
value (int): value for IDD Field `Maximum Figures in Shadow Overlap Calculations`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `maximum_figures_in_shadow_overlap_calculations` or None if not set
"""
return self["Maximum Figures in Shadow Overlap Calculations"]
@maximum_figures_in_shadow_overlap_calculations.setter
def maximum_figures_in_shadow_overlap_calculations(self, value=15000):
"""Corresponds to IDD field `Maximum Figures in Shadow Overlap
Calculations`"""
self["Maximum Figures in Shadow Overlap Calculations"] = value
@property
def polygon_clipping_algorithm(self):
"""field `Polygon Clipping Algorithm`
| Advanced Feature. Internal default is SutherlandHodgman
| Refer to InputOutput Reference and Engineering Reference for more information
Args:
value (str): value for IDD Field `Polygon Clipping Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `polygon_clipping_algorithm` or None if not set
"""
return self["Polygon Clipping Algorithm"]
@polygon_clipping_algorithm.setter
def polygon_clipping_algorithm(self, value=None):
"""Corresponds to IDD field `Polygon Clipping Algorithm`"""
self["Polygon Clipping Algorithm"] = value
@property
def sky_diffuse_modeling_algorithm(self):
"""field `Sky Diffuse Modeling Algorithm`
| Advanced Feature. Internal default is SimpleSkyDiffuseModeling
| If you have shading elements that change transmittance over the
| year, you may wish to choose the detailed method.
| Refer to InputOutput Reference and Engineering Reference for more information
Args:
value (str): value for IDD Field `Sky Diffuse Modeling Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `sky_diffuse_modeling_algorithm` or None if not set
"""
return self["Sky Diffuse Modeling Algorithm"]
@sky_diffuse_modeling_algorithm.setter
def sky_diffuse_modeling_algorithm(self, value=None):
"""Corresponds to IDD field `Sky Diffuse Modeling Algorithm`"""
self["Sky Diffuse Modeling Algorithm"] = value
class SurfaceConvectionAlgorithmInside(DataObject):
""" Corresponds to IDD object `SurfaceConvectionAlgorithm:Inside`
Default indoor surface heat transfer convection algorithm to be used for all zones
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'algorithm',
{'name': u'Algorithm',
'pyname': u'algorithm',
'default': u'TARP',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Simple',
u'TARP',
u'CeilingDiffuser',
u'AdaptiveConvectionAlgorithm'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'SurfaceConvectionAlgorithm:Inside',
'pyname': u'SurfaceConvectionAlgorithmInside',
'required-object': False,
'unique-object': True}
@property
def algorithm(self):
"""field `Algorithm`
| Simple = constant value natural convection (ASHRAE)
| TARP = variable natural convection based on temperature difference (ASHRAE, Walton)
| CeilingDiffuser = ACH-based forced and mixed convection correlations
| for ceiling diffuser configuration with simple natural convection limit
| AdaptiveConvectionAlgorithm = dynamic selection of convection models based on conditions
| Default value: TARP
Args:
value (str): value for IDD Field `Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `algorithm` or None if not set
"""
return self["Algorithm"]
@algorithm.setter
def algorithm(self, value="TARP"):
"""Corresponds to IDD field `Algorithm`"""
self["Algorithm"] = value
class SurfaceConvectionAlgorithmOutside(DataObject):
""" Corresponds to IDD object `SurfaceConvectionAlgorithm:Outside`
Default outside surface heat transfer convection algorithm to be used for all zones
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'algorithm',
{'name': u'Algorithm',
'pyname': u'algorithm',
'default': u'DOE-2',
'required-field': True,
'autosizable': False,
'accepted-values': [u'SimpleCombined',
u'TARP',
u'MoWiTT',
u'DOE-2',
u'AdaptiveConvectionAlgorithm'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'SurfaceConvectionAlgorithm:Outside',
'pyname': u'SurfaceConvectionAlgorithmOutside',
'required-object': False,
'unique-object': True}
@property
def algorithm(self):
"""field `Algorithm`
| SimpleCombined = Combined radiation and convection coefficient using simple ASHRAE model
| TARP = correlation from models developed by ASHRAE, Walton, and Sparrow et. al.
| MoWiTT = correlation from measurements by Klems and Yazdanian for smooth surfaces
| DOE-2 = correlation from measurements by Klems and Yazdanian for rough surfaces
| AdaptiveConvectionAlgorithm = dynamic selection of correlations based on conditions
| Default value: DOE-2
Args:
value (str): value for IDD Field `Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `algorithm` or None if not set
"""
return self["Algorithm"]
@algorithm.setter
def algorithm(self, value="DOE-2"):
"""Corresponds to IDD field `Algorithm`"""
self["Algorithm"] = value
class HeatBalanceAlgorithm(DataObject):
"""Corresponds to IDD object `HeatBalanceAlgorithm` Determines which Heat
Balance Algorithm will be used ie.
CTF (Conduction Transfer Functions),
EMPD (Effective Moisture Penetration Depth with Conduction Transfer Functions).
Advanced/Research Usage: CondFD (Conduction Finite Difference)
Advanced/Research Usage: ConductionFiniteDifferenceSimplified
Advanced/Research Usage: HAMT (Combined Heat And Moisture Finite Element)
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'algorithm',
{'name': u'Algorithm',
'pyname': u'algorithm',
'default': u'ConductionTransferFunction',
'required-field': True,
'autosizable': False,
'accepted-values': [u'ConductionTransferFunction',
u'MoisturePenetrationDepthConductionTransferFunction',
u'ConductionFiniteDifference',
u'CombinedHeatAndMoistureFiniteElement'],
'autocalculatable': False,
'type': 'alpha'}),
(u'surface temperature upper limit',
{'name': u'Surface Temperature Upper Limit',
'pyname': u'surface_temperature_upper_limit',
'default': 200.0,
'required-field': False,
'autosizable': False,
'minimum': 200.0,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'minimum surface convection heat transfer coefficient value',
{'name': u'Minimum Surface Convection Heat Transfer Coefficient Value',
'pyname': u'minimum_surface_convection_heat_transfer_coefficient_value',
'default': 0.1,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': 'real',
'unit': u'W/m2-K'}),
(u'maximum surface convection heat transfer coefficient value',
{'name': u'Maximum Surface Convection Heat Transfer Coefficient Value',
'pyname': u'maximum_surface_convection_heat_transfer_coefficient_value',
'default': 1000.0,
'required-field': False,
'autosizable': False,
'minimum': 1.0,
'autocalculatable': False,
'type': 'real',
'unit': u'W/m2-K'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'HeatBalanceAlgorithm',
'pyname': u'HeatBalanceAlgorithm',
'required-object': False,
'unique-object': True}
@property
def algorithm(self):
"""field `Algorithm`
| Default value: ConductionTransferFunction
Args:
value (str): value for IDD Field `Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `algorithm` or None if not set
"""
return self["Algorithm"]
@algorithm.setter
def algorithm(self, value="ConductionTransferFunction"):
"""Corresponds to IDD field `Algorithm`"""
self["Algorithm"] = value
@property
def surface_temperature_upper_limit(self):
"""field `Surface Temperature Upper Limit`
| Units: C
| Default value: 200.0
| value >= 200.0
Args:
value (float): value for IDD Field `Surface Temperature Upper Limit`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `surface_temperature_upper_limit` or None if not set
"""
return self["Surface Temperature Upper Limit"]
@surface_temperature_upper_limit.setter
def surface_temperature_upper_limit(self, value=200.0):
"""Corresponds to IDD field `Surface Temperature Upper Limit`"""
self["Surface Temperature Upper Limit"] = value
@property
def minimum_surface_convection_heat_transfer_coefficient_value(self):
"""field `Minimum Surface Convection Heat Transfer Coefficient Value`
| Units: W/m2-K
| Default value: 0.1
Args:
value (float): value for IDD Field `Minimum Surface Convection Heat Transfer Coefficient Value`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_surface_convection_heat_transfer_coefficient_value` or None if not set
"""
return self[
"Minimum Surface Convection Heat Transfer Coefficient Value"]
@minimum_surface_convection_heat_transfer_coefficient_value.setter
def minimum_surface_convection_heat_transfer_coefficient_value(
self,
value=0.1):
"""Corresponds to IDD field `Minimum Surface Convection Heat Transfer
Coefficient Value`"""
self[
"Minimum Surface Convection Heat Transfer Coefficient Value"] = value
@property
def maximum_surface_convection_heat_transfer_coefficient_value(self):
"""field `Maximum Surface Convection Heat Transfer Coefficient Value`
| Units: W/m2-K
| Default value: 1000.0
| value >= 1.0
Args:
value (float): value for IDD Field `Maximum Surface Convection Heat Transfer Coefficient Value`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_surface_convection_heat_transfer_coefficient_value` or None if not set
"""
return self[
"Maximum Surface Convection Heat Transfer Coefficient Value"]
@maximum_surface_convection_heat_transfer_coefficient_value.setter
def maximum_surface_convection_heat_transfer_coefficient_value(
self,
value=1000.0):
"""Corresponds to IDD field `Maximum Surface Convection Heat Transfer
Coefficient Value`"""
self[
"Maximum Surface Convection Heat Transfer Coefficient Value"] = value
class HeatBalanceSettingsConductionFiniteDifference(DataObject):
""" Corresponds to IDD object `HeatBalanceSettings:ConductionFiniteDifference`
Determines settings for the Conduction Finite Difference
algorithm for surface heat transfer modeling.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'difference scheme',
{'name': u'Difference Scheme',
'pyname': u'difference_scheme',
'default': u'FullyImplicitFirstOrder',
'required-field': False,
'autosizable': False,
'accepted-values': [u'CrankNicholsonSecondOrder',
u'FullyImplicitFirstOrder'],
'autocalculatable': False,
'type': 'alpha'}),
(u'space discretization constant',
{'name': u'Space Discretization Constant',
'pyname': u'space_discretization_constant',
'default': 3.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'relaxation factor',
{'name': u'Relaxation Factor',
'pyname': u'relaxation_factor',
'default': 1.0,
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.01,
'autocalculatable': False,
'type': u'real'}),
(u'inside face surface temperature convergence criteria',
{'name': u'Inside Face Surface Temperature Convergence Criteria',
'pyname': u'inside_face_surface_temperature_convergence_criteria',
'default': 0.002,
'maximum': 0.01,
'required-field': False,
'autosizable': False,
'minimum': 1e-07,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'HeatBalanceSettings:ConductionFiniteDifference',
'pyname': u'HeatBalanceSettingsConductionFiniteDifference',
'required-object': False,
'unique-object': True}
@property
def difference_scheme(self):
"""field `Difference Scheme`
| Default value: FullyImplicitFirstOrder
Args:
value (str): value for IDD Field `Difference Scheme`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `difference_scheme` or None if not set
"""
return self["Difference Scheme"]
@difference_scheme.setter
def difference_scheme(self, value="FullyImplicitFirstOrder"):
"""Corresponds to IDD field `Difference Scheme`"""
self["Difference Scheme"] = value
@property
def space_discretization_constant(self):
"""field `Space Discretization Constant`
| increase or decrease number of nodes
| Default value: 3.0
Args:
value (float): value for IDD Field `Space Discretization Constant`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `space_discretization_constant` or None if not set
"""
return self["Space Discretization Constant"]
@space_discretization_constant.setter
def space_discretization_constant(self, value=3.0):
"""Corresponds to IDD field `Space Discretization Constant`"""
self["Space Discretization Constant"] = value
@property
def relaxation_factor(self):
"""field `Relaxation Factor`
| Default value: 1.0
| value >= 0.01
| value <= 1.0
Args:
value (float): value for IDD Field `Relaxation Factor`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relaxation_factor` or None if not set
"""
return self["Relaxation Factor"]
@relaxation_factor.setter
def relaxation_factor(self, value=1.0):
"""Corresponds to IDD field `Relaxation Factor`"""
self["Relaxation Factor"] = value
@property
def inside_face_surface_temperature_convergence_criteria(self):
"""field `Inside Face Surface Temperature Convergence Criteria`
| Default value: 0.002
| value >= 1e-07
| value <= 0.01
Args:
value (float): value for IDD Field `Inside Face Surface Temperature Convergence Criteria`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `inside_face_surface_temperature_convergence_criteria` or None if not set
"""
return self["Inside Face Surface Temperature Convergence Criteria"]
@inside_face_surface_temperature_convergence_criteria.setter
def inside_face_surface_temperature_convergence_criteria(
self,
value=0.002):
"""Corresponds to IDD field `Inside Face Surface Temperature
Convergence Criteria`"""
self["Inside Face Surface Temperature Convergence Criteria"] = value
class ZoneAirHeatBalanceAlgorithm(DataObject):
"""Corresponds to IDD object `ZoneAirHeatBalanceAlgorithm` Determines which
algorithm will be used to solve the zone air heat balance."""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'algorithm',
{'name': u'Algorithm',
'pyname': u'algorithm',
'default': u'ThirdOrderBackwardDifference',
'required-field': False,
'autosizable': False,
'accepted-values': [u'ThirdOrderBackwardDifference',
u'AnalyticalSolution',
u'EulerMethod'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'ZoneAirHeatBalanceAlgorithm',
'pyname': u'ZoneAirHeatBalanceAlgorithm',
'required-object': False,
'unique-object': True}
@property
def algorithm(self):
"""field `Algorithm`
| Default value: ThirdOrderBackwardDifference
Args:
value (str): value for IDD Field `Algorithm`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `algorithm` or None if not set
"""
return self["Algorithm"]
@algorithm.setter
def algorithm(self, value="ThirdOrderBackwardDifference"):
"""Corresponds to IDD field `Algorithm`"""
self["Algorithm"] = value
class ZoneAirContaminantBalance(DataObject):
"""Corresponds to IDD object `ZoneAirContaminantBalance` Determines which
contaminant concentration will be simulates."""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'carbon dioxide concentration',
{'name': u'Carbon Dioxide Concentration',
'pyname': u'carbon_dioxide_concentration',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'outdoor carbon dioxide schedule name',
{'name': u'Outdoor Carbon Dioxide Schedule Name',
'pyname': u'outdoor_carbon_dioxide_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'generic contaminant concentration',
{'name': u'Generic Contaminant Concentration',
'pyname': u'generic_contaminant_concentration',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'outdoor generic contaminant schedule name',
{'name': u'Outdoor Generic Contaminant Schedule Name',
'pyname': u'outdoor_generic_contaminant_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'ZoneAirContaminantBalance',
'pyname': u'ZoneAirContaminantBalance',
'required-object': False,
'unique-object': True}
@property
def carbon_dioxide_concentration(self):
"""field `Carbon Dioxide Concentration`
| If Yes, CO2 simulation will be performed.
| Default value: No
Args:
value (str): value for IDD Field `Carbon Dioxide Concentration`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `carbon_dioxide_concentration` or None if not set
"""
return self["Carbon Dioxide Concentration"]
@carbon_dioxide_concentration.setter
def carbon_dioxide_concentration(self, value="No"):
"""Corresponds to IDD field `Carbon Dioxide Concentration`"""
self["Carbon Dioxide Concentration"] = value
@property
def outdoor_carbon_dioxide_schedule_name(self):
"""field `Outdoor Carbon Dioxide Schedule Name`
| Schedule values should be in parts per million (ppm)
Args:
value (str): value for IDD Field `Outdoor Carbon Dioxide Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outdoor_carbon_dioxide_schedule_name` or None if not set
"""
return self["Outdoor Carbon Dioxide Schedule Name"]
@outdoor_carbon_dioxide_schedule_name.setter
def outdoor_carbon_dioxide_schedule_name(self, value=None):
"""Corresponds to IDD field `Outdoor Carbon Dioxide Schedule Name`"""
self["Outdoor Carbon Dioxide Schedule Name"] = value
@property
def generic_contaminant_concentration(self):
"""field `Generic Contaminant Concentration`
| If Yes, generic contaminant simulation will be performed.
| Default value: No
Args:
value (str): value for IDD Field `Generic Contaminant Concentration`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `generic_contaminant_concentration` or None if not set
"""
return self["Generic Contaminant Concentration"]
@generic_contaminant_concentration.setter
def generic_contaminant_concentration(self, value="No"):
"""Corresponds to IDD field `Generic Contaminant Concentration`"""
self["Generic Contaminant Concentration"] = value
@property
def outdoor_generic_contaminant_schedule_name(self):
"""field `Outdoor Generic Contaminant Schedule Name`
| Schedule values should be generic contaminant concentration in parts per
| million (ppm)
Args:
value (str): value for IDD Field `Outdoor Generic Contaminant Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outdoor_generic_contaminant_schedule_name` or None if not set
"""
return self["Outdoor Generic Contaminant Schedule Name"]
@outdoor_generic_contaminant_schedule_name.setter
def outdoor_generic_contaminant_schedule_name(self, value=None):
"""Corresponds to IDD field `Outdoor Generic Contaminant Schedule
Name`"""
self["Outdoor Generic Contaminant Schedule Name"] = value
class ZoneAirMassFlowConservation(DataObject):
"""Corresponds to IDD object `ZoneAirMassFlowConservation` Enforces the
zone air mass flow balance by adjusting zone mixing object and/or
infiltration object mass flow rates.
If either mixing or infiltration is active, then the zone air mass
flow balance calculation will attempt to enforce conservation of
mass for each zone.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'adjust zone mixing for zone air mass flow balance',
{'name': u'Adjust Zone Mixing For Zone Air Mass Flow Balance',
'pyname': u'adjust_zone_mixing_for_zone_air_mass_flow_balance',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'infiltration balancing method',
{'name': u'Infiltration Balancing Method',
'pyname': u'infiltration_balancing_method',
'default': u'AddInfiltrationFlow',
'required-field': False,
'autosizable': False,
'accepted-values': [u'AddInfiltrationFlow',
u'AdjustInfiltrationFlow',
u'None'],
'autocalculatable': False,
'type': 'alpha'}),
(u'infiltration balancing zones',
{'name': u'Infiltration Balancing Zones',
'pyname': u'infiltration_balancing_zones',
'default': u'MixingSourceZonesOnly',
'required-field': False,
'autosizable': False,
'accepted-values': [u'MixingSourceZonesOnly',
u'AllZones'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 3,
'name': u'ZoneAirMassFlowConservation',
'pyname': u'ZoneAirMassFlowConservation',
'required-object': False,
'unique-object': True}
@property
def adjust_zone_mixing_for_zone_air_mass_flow_balance(self):
"""field `Adjust Zone Mixing For Zone Air Mass Flow Balance`
| If Yes, Zone mixing object flow rates are adjusted to balance the zone air mass flow
| and additional infiltration air flow may be added if required in order to balance the
| zone air mass flow.
| Default value: No
Args:
value (str): value for IDD Field `Adjust Zone Mixing For Zone Air Mass Flow Balance`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `adjust_zone_mixing_for_zone_air_mass_flow_balance` or None if not set
"""
return self["Adjust Zone Mixing For Zone Air Mass Flow Balance"]
@adjust_zone_mixing_for_zone_air_mass_flow_balance.setter
def adjust_zone_mixing_for_zone_air_mass_flow_balance(self, value="No"):
"""Corresponds to IDD field `Adjust Zone Mixing For Zone Air Mass Flow
Balance`"""
self["Adjust Zone Mixing For Zone Air Mass Flow Balance"] = value
@property
def infiltration_balancing_method(self):
"""field `Infiltration Balancing Method`
| This input field allows user to choose how zone infiltration flow is treated during
| the zone air mass flow balance calculation.
| AddInfiltrationFlow may add infiltration to the base flow specified in the
| infiltration object to balance the zone air mass flow. The additional infiltration
| air mass flow is not self-balanced. The base flow is assumed to be self-balanced.
| AdjustInfiltrationFlow may adjust the base flow calculated using
| the base flow specified in the infiltration object to balance the zone air mass flow. If it
| If no adjustment is required, then the base infiltration is assumed to be self-balanced.
| None will make no changes to the base infiltration flow.
| Default value: AddInfiltrationFlow
Args:
value (str): value for IDD Field `Infiltration Balancing Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `infiltration_balancing_method` or None if not set
"""
return self["Infiltration Balancing Method"]
@infiltration_balancing_method.setter
def infiltration_balancing_method(self, value="AddInfiltrationFlow"):
"""Corresponds to IDD field `Infiltration Balancing Method`"""
self["Infiltration Balancing Method"] = value
@property
def infiltration_balancing_zones(self):
"""field `Infiltration Balancing Zones`
| This input field allows user to choose which zones are included in infiltration balancing.
| MixingSourceZonesOnly allows infiltration balancing only in zones which as source zones for mixing
| which also have an infiltration object defined.
| AllZones allows infiltration balancing in any zone which has an infiltration object defined.
| Default value: MixingSourceZonesOnly
Args:
value (str): value for IDD Field `Infiltration Balancing Zones`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `infiltration_balancing_zones` or None if not set
"""
return self["Infiltration Balancing Zones"]
@infiltration_balancing_zones.setter
def infiltration_balancing_zones(self, value="MixingSourceZonesOnly"):
"""Corresponds to IDD field `Infiltration Balancing Zones`"""
self["Infiltration Balancing Zones"] = value
class ZoneCapacitanceMultiplierResearchSpecial(DataObject):
""" Corresponds to IDD object `ZoneCapacitanceMultiplier:ResearchSpecial`
Multiplier altering the relative capacitance of the air compared to an empty zone
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'temperature capacity multiplier',
{'name': u'Temperature Capacity Multiplier',
'pyname': u'temperature_capacity_multiplier',
'default': 1.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'humidity capacity multiplier',
{'name': u'Humidity Capacity Multiplier',
'pyname': u'humidity_capacity_multiplier',
'default': 1.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'carbon dioxide capacity multiplier',
{'name': u'Carbon Dioxide Capacity Multiplier',
'pyname': u'carbon_dioxide_capacity_multiplier',
'default': 1.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'generic contaminant capacity multiplier',
{'name': u'Generic Contaminant Capacity Multiplier',
'pyname': u'generic_contaminant_capacity_multiplier',
'default': 1.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 4,
'name': u'ZoneCapacitanceMultiplier:ResearchSpecial',
'pyname': u'ZoneCapacitanceMultiplierResearchSpecial',
'required-object': False,
'unique-object': True}
@property
def temperature_capacity_multiplier(self):
"""field `Temperature Capacity Multiplier`
| Used to alter the capacitance of zone air with respect to heat or temperature
| Default value: 1.0
Args:
value (float): value for IDD Field `Temperature Capacity Multiplier`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `temperature_capacity_multiplier` or None if not set
"""
return self["Temperature Capacity Multiplier"]
@temperature_capacity_multiplier.setter
def temperature_capacity_multiplier(self, value=1.0):
"""Corresponds to IDD field `Temperature Capacity Multiplier`"""
self["Temperature Capacity Multiplier"] = value
@property
def humidity_capacity_multiplier(self):
"""field `Humidity Capacity Multiplier`
| Used to alter the capacitance of zone air with respect to moisture or humidity ratio
| Default value: 1.0
Args:
value (float): value for IDD Field `Humidity Capacity Multiplier`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `humidity_capacity_multiplier` or None if not set
"""
return self["Humidity Capacity Multiplier"]
@humidity_capacity_multiplier.setter
def humidity_capacity_multiplier(self, value=1.0):
"""Corresponds to IDD field `Humidity Capacity Multiplier`"""
self["Humidity Capacity Multiplier"] = value
@property
def carbon_dioxide_capacity_multiplier(self):
"""field `Carbon Dioxide Capacity Multiplier`
| Used to alter the capacitance of zone air with respect to zone air carbon dioxide concentration
| Default value: 1.0
Args:
value (float): value for IDD Field `Carbon Dioxide Capacity Multiplier`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `carbon_dioxide_capacity_multiplier` or None if not set
"""
return self["Carbon Dioxide Capacity Multiplier"]
@carbon_dioxide_capacity_multiplier.setter
def carbon_dioxide_capacity_multiplier(self, value=1.0):
"""Corresponds to IDD field `Carbon Dioxide Capacity Multiplier`"""
self["Carbon Dioxide Capacity Multiplier"] = value
@property
def generic_contaminant_capacity_multiplier(self):
"""field `Generic Contaminant Capacity Multiplier`
| Used to alter the capacitance of zone air with respect to zone air generic contaminant concentration
| Default value: 1.0
Args:
value (float): value for IDD Field `Generic Contaminant Capacity Multiplier`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `generic_contaminant_capacity_multiplier` or None if not set
"""
return self["Generic Contaminant Capacity Multiplier"]
@generic_contaminant_capacity_multiplier.setter
def generic_contaminant_capacity_multiplier(self, value=1.0):
"""Corresponds to IDD field `Generic Contaminant Capacity
Multiplier`"""
self["Generic Contaminant Capacity Multiplier"] = value
class Timestep(DataObject):
"""Corresponds to IDD object `Timestep` Specifies the "basic" timestep for
the simulation.
The value entered here is also known as the Zone Timestep. This is
used in the Zone Heat Balance Model calculation as the driving
timestep for heat transfer and load calculations.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'number of timesteps per hour',
{'name': u'Number of Timesteps per Hour',
'pyname': u'number_of_timesteps_per_hour',
'default': 6,
'maximum': 60,
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'})]),
'format': u'singleline',
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'Timestep',
'pyname': u'Timestep',
'required-object': False,
'unique-object': True}
@property
def number_of_timesteps_per_hour(self):
"""field `Number of Timesteps per Hour`
| Number in hour: normal validity 4 to 60: 6 suggested
| Must be evenly divisible into 60
| Allowable values include 1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30, and 60
| Normal 6 is minimum as lower values may cause inaccuracies
| A minimum value of 20 is suggested for both ConductionFiniteDifference
| and CombinedHeatAndMoistureFiniteElement surface heat balance algorithms
| A minimum of 12 is suggested for simulations involving a Vegetated Roof (Material:RoofVegetation).
| Default value: 6
| value >= 1
| value <= 60
Args:
value (int): value for IDD Field `Number of Timesteps per Hour`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `number_of_timesteps_per_hour` or None if not set
"""
return self["Number of Timesteps per Hour"]
@number_of_timesteps_per_hour.setter
def number_of_timesteps_per_hour(self, value=6):
"""Corresponds to IDD field `Number of Timesteps per Hour`"""
self["Number of Timesteps per Hour"] = value
class ConvergenceLimits(DataObject):
"""Corresponds to IDD object `ConvergenceLimits` Specifies limits on HVAC
system simulation timesteps and iterations.
This item is an advanced feature that should be used only with
caution.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'minimum system timestep',
{'name': u'Minimum System Timestep',
'pyname': u'minimum_system_timestep',
'maximum': 60,
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'maximum hvac iterations',
{'name': u'Maximum HVAC Iterations',
'pyname': u'maximum_hvac_iterations',
'default': 20,
'required-field': False,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'}),
(u'minimum plant iterations',
{'name': u'Minimum Plant Iterations',
'pyname': u'minimum_plant_iterations',
'default': 2,
'required-field': False,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'}),
(u'maximum plant iterations',
{'name': u'Maximum Plant Iterations',
'pyname': u'maximum_plant_iterations',
'default': 8,
'required-field': False,
'autosizable': False,
'minimum': 2,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'ConvergenceLimits',
'pyname': u'ConvergenceLimits',
'required-object': False,
'unique-object': True}
@property
def minimum_system_timestep(self):
"""field `Minimum System Timestep`
| 0 sets the minimum to the zone timestep (ref: Timestep)
| 1 is normal (ratchet down to 1 minute)
| setting greater than zone timestep (in minutes) will effectively set to zone timestep
| Units: minutes
| value <= 60
Args:
value (int): value for IDD Field `Minimum System Timestep`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_system_timestep` or None if not set
"""
return self["Minimum System Timestep"]
@minimum_system_timestep.setter
def minimum_system_timestep(self, value=None):
"""Corresponds to IDD field `Minimum System Timestep`"""
self["Minimum System Timestep"] = value
@property
def maximum_hvac_iterations(self):
"""field `Maximum HVAC Iterations`
| Default value: 20
| value >= 1
Args:
value (int): value for IDD Field `Maximum HVAC Iterations`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `maximum_hvac_iterations` or None if not set
"""
return self["Maximum HVAC Iterations"]
@maximum_hvac_iterations.setter
def maximum_hvac_iterations(self, value=20):
"""Corresponds to IDD field `Maximum HVAC Iterations`"""
self["Maximum HVAC Iterations"] = value
@property
def minimum_plant_iterations(self):
"""field `Minimum Plant Iterations`
| Controls the minimum number of plant system solver iterations within a single HVAC iteration
| Larger values will increase runtime but might improve solution accuracy for complicated plant systems
| Complex plants include: several interconnected loops, heat recovery, thermal load following generators, etc.
| Default value: 2
| value >= 1
Args:
value (int): value for IDD Field `Minimum Plant Iterations`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_plant_iterations` or None if not set
"""
return self["Minimum Plant Iterations"]
@minimum_plant_iterations.setter
def minimum_plant_iterations(self, value=2):
"""Corresponds to IDD field `Minimum Plant Iterations`"""
self["Minimum Plant Iterations"] = value
@property
def maximum_plant_iterations(self):
"""field `Maximum Plant Iterations`
| Controls the maximum number of plant system solver iterations within a single HVAC iteration
| Smaller values might decrease runtime but could decrease solution accuracy for complicated plant systems
| Default value: 8
| value >= 2
Args:
value (int): value for IDD Field `Maximum Plant Iterations`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `maximum_plant_iterations` or None if not set
"""
return self["Maximum Plant Iterations"]
@maximum_plant_iterations.setter
def maximum_plant_iterations(self, value=8):
"""Corresponds to IDD field `Maximum Plant Iterations`"""
self["Maximum Plant Iterations"] = value
class ProgramControl(DataObject):
"""Corresponds to IDD object `ProgramControl` used to support various
efforts in time reduction for simulation including threading This object is
currently disabled."""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'number of threads allowed',
{'name': u'Number of Threads Allowed',
'pyname': u'number_of_threads_allowed',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Simulation Parameters',
'min-fields': 0,
'name': u'ProgramControl',
'pyname': u'ProgramControl',
'required-object': False,
'unique-object': False}
@property
def number_of_threads_allowed(self):
"""field `Number of Threads Allowed`
| This is currently used only in the Interior Radiant Exchange module -- view factors on # surfaces
| if value is 0, then maximum number allowed will be used.
Args:
value (int): value for IDD Field `Number of Threads Allowed`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `number_of_threads_allowed` or None if not set
"""
return self["Number of Threads Allowed"]
@number_of_threads_allowed.setter
def number_of_threads_allowed(self, value=None):
"""Corresponds to IDD field `Number of Threads Allowed`"""
self["Number of Threads Allowed"] = value
|
MakeHer/edx-platform
|
refs/heads/dashboard.2
|
openedx/core/djangoapps/credit/routers.py
|
138
|
""" DRF routers. """
from rest_framework import routers
class SimpleRouter(routers.SimpleRouter):
""" Simple DRF router. """
# Note (CCB): This is a retrofit of a DRF 2.4 feature onto DRF 2.3. This is, sadly, simpler than
# updating edx-ora2 to work with DRF 2.4. See https://github.com/tomchristie/django-rest-framework/pull/1333
# for details on this specific DRF 2.4 feature.
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_field}>{lookup_value})'
lookup_field = getattr(viewset, 'lookup_field', 'pk')
try:
lookup_value = viewset.lookup_value_regex
except AttributeError:
# Don't consume `.json` style suffixes
lookup_value = '[^/.]+'
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_field=lookup_field,
lookup_value=lookup_value
)
|
kcavagnolo/astroML
|
refs/heads/master
|
astroML/datasets/sdss_filters.py
|
3
|
from __future__ import print_function, division
import os
import numpy as np
from astroML.datasets import get_data_home
from ..py3k_compat import urlopen
# Info on vega spectrum: http://www.stsci.edu/hst/observatory/cdbs/calspec.html
VEGA_URL = 'http://www.astro.washington.edu/users/ivezic/DMbook/data/1732526_nic_002.ascii'
FILTER_URL = 'http://classic.sdss.org/dr7/instruments/imager/filters/%s.dat'
def fetch_sdss_filter(fname, data_home=None, download_if_missing=True):
"""Loader for SDSS Filter profiles
Parameters
----------
fname : str
filter name: must be one of 'ugriz'
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : ndarray
data is an array of shape (5, Nlam)
first row: wavelength in angstroms
second row: sensitivity to point source, airmass 1.3
third row: sensitivity to extended source, airmass 1.3
fourth row: sensitivity to extended source, airmass 0.0
fifth row: assumed atmospheric extinction, airmass 1.0
"""
if fname not in 'ugriz':
raise ValueError("Unrecognized filter name '%s'" % fname)
url = FILTER_URL % fname
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_file = os.path.join(data_home, '%s.dat' % fname)
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
print("downloading from %s" % url)
F = urlopen(url)
open(archive_file, 'wb').write(F.read())
F = open(archive_file)
return np.loadtxt(F, unpack=True)
def fetch_vega_spectrum(data_home=None, download_if_missing=True):
"""Loader for Vega reference spectrum
Parameters
----------
fname : str
filter name: must be one of 'ugriz'
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : ndarray
data[0] is the array of wavelength in angstroms
data[1] is the array of fluxes in Jy (F_nu, not F_lambda)
"""
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_name = os.path.join(data_home, VEGA_URL.split('/')[-1])
if not os.path.exists(archive_name):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
print("downnloading from %s" % VEGA_URL)
F = urlopen(VEGA_URL)
open(archive_name, 'wb').write(F.read())
F = open(archive_name, 'r')
return np.loadtxt(F, unpack=True)
|
thurt/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/testing/gtest/test/gtest_help_test.py
|
2968
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
kuri65536/python-for-android
|
refs/heads/master
|
python3-alpha/python-libs/gdata/tlslite/VerifierDB.py
|
48
|
"""Class for storing SRP password verifiers."""
from .utils.cryptomath import *
from .utils.compat import *
from . import mathtls
from .BaseDB import BaseDB
class VerifierDB(BaseDB):
"""This class represent an in-memory or on-disk database of SRP
password verifiers.
A VerifierDB can be passed to a server handshake to authenticate
a client based on one of the verifiers.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new VerifierDB instance.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "verifier")
def _getItem(self, username, valueStr):
(N, g, salt, verifier) = valueStr.split(" ")
N = base64ToNumber(N)
g = base64ToNumber(g)
salt = base64ToString(salt)
verifier = base64ToNumber(verifier)
return (N, g, salt, verifier)
def __setitem__(self, username, verifierEntry):
"""Add a verifier entry to the database.
@type username: str
@param username: The username to associate the verifier with.
Must be less than 256 characters in length. Must not already
be in the database.
@type verifierEntry: tuple
@param verifierEntry: The verifier entry to add. Use
L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a
verifier entry.
"""
BaseDB.__setitem__(self, username, verifierEntry)
def _setItem(self, username, value):
if len(username)>=256:
raise ValueError("username too long")
N, g, salt, verifier = value
N = numberToBase64(N)
g = numberToBase64(g)
salt = stringToBase64(salt)
verifier = numberToBase64(verifier)
valueStr = " ".join( (N, g, salt, verifier) )
return valueStr
def _checkItem(self, value, username, param):
(N, g, salt, verifier) = value
x = mathtls.makeX(salt, username, param)
v = powMod(g, x, N)
return (verifier == v)
def makeVerifier(username, password, bits):
"""Create a verifier entry which can be stored in a VerifierDB.
@type username: str
@param username: The username for this verifier. Must be less
than 256 characters in length.
@type password: str
@param password: The password for this verifier.
@type bits: int
@param bits: This values specifies which SRP group parameters
to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144,
8192). Larger values are more secure but slower. 2048 is a
good compromise between safety and speed.
@rtype: tuple
@return: A tuple which may be stored in a VerifierDB.
"""
return mathtls.makeVerifier(username, password, bits)
makeVerifier = staticmethod(makeVerifier)
|
actuaryzhang/spark
|
refs/heads/master
|
sql/hive/src/test/resources/data/scripts/newline.py
|
131
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for line in sys.stdin:
print("1\\n2")
print("1\\r2")
print("1\\t2")
|
sdklite/gyp
|
refs/heads/master
|
test/mac/gyptest-sourceless-module.py
|
200
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that bundles that have no 'sources' (pure resource containers) work.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='sourceless-module')
# Just needs to build without errors.
test.build('test.gyp', 'empty_bundle', chdir='sourceless-module')
test.built_file_must_not_exist(
'empty_bundle.bundle', chdir='sourceless-module')
# Needs to build, and contain a resource.
test.build('test.gyp', 'resource_bundle', chdir='sourceless-module')
test.built_file_must_exist(
'resource_bundle.bundle/Contents/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_not_exist(
'resource_bundle.bundle/Contents/MacOS/resource_bundle',
chdir='sourceless-module')
# Build an app containing an actionless bundle.
test.build(
'test.gyp',
'bundle_dependent_on_resource_bundle_no_actions',
chdir='sourceless-module')
test.built_file_must_exist(
'bundle_dependent_on_resource_bundle_no_actions.app/Contents/Resources/'
'mac_resource_bundle_no_actions.bundle/Contents/Resources/empty.txt',
chdir='sourceless-module')
# Needs to build and cause the bundle to be built.
test.build(
'test.gyp', 'dependent_on_resource_bundle', chdir='sourceless-module')
test.built_file_must_exist(
'resource_bundle.bundle/Contents/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_not_exist(
'resource_bundle.bundle/Contents/MacOS/resource_bundle',
chdir='sourceless-module')
# TODO(thakis): shared_libraries that have no sources but depend on static
# libraries currently only work with the ninja generator. This is used by
# chrome/mac's components build.
if test.format == 'ninja':
# Check that an executable depending on a resource framework links fine too.
test.build(
'test.gyp', 'dependent_on_resource_framework', chdir='sourceless-module')
test.built_file_must_exist(
'resource_framework.framework/Resources/foo.manifest',
chdir='sourceless-module')
test.built_file_must_exist(
'resource_framework.framework/resource_framework',
chdir='sourceless-module')
test.pass_test()
|
nhicher/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/efs_facts.py
|
47
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- This module can be used to search Amazon EFS file systems.
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
aliases: [ creation_token ]
id:
description:
- ID of Amazon EFS.
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary.
targets:
description:
- List of targets on which to filter the returned results.
- Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Find all existing efs
efs_facts:
register: result
- name: Find efs using id
efs_facts:
id: fs-1234abcd
- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
filesystem_address:
description: url of file system
returned: always
type: str
sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
throughput_mode:
description: mode of throughput for the file system
returned: when botocore >= 1.10.57
type: str
sample: "bursting"
provisioned_throughput_in_mibps:
description: throughput provisioned in Mibps
returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
type: float
sample: 15.0
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
from ansible.module_utils._text import to_native
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
self.region = region
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def list_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
paginator = self.connection.get_paginator('describe_file_systems')
return paginator.paginate(**kwargs).build_full_result()['FileSystems']
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_tags(self, file_system_id):
"""
Returns tag list for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_tags')
return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
@AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_mount_targets(self, file_system_id):
"""
Returns mount targets for selected instance of EFS
"""
paginator = self.connection.get_paginator('describe_mount_targets')
return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
def get_security_groups(self, mount_target_id):
"""
Returns security groups for selected instance of EFS
"""
return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
def get_mount_targets_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
try:
mount_targets = self.get_mount_targets(item['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
for mt in mount_targets:
item['mount_targets'].append(camel_dict_to_snake_dict(mt))
return file_systems
def get_security_groups_data(self, file_systems):
for item in file_systems:
if item['life_cycle_state'] == self.STATE_AVAILABLE:
for target in item['mount_targets']:
if target['life_cycle_state'] == self.STATE_AVAILABLE:
try:
target['security_groups'] = self.get_security_groups(target['mount_target_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
else:
target['security_groups'] = []
else:
item['tags'] = {}
item['mount_targets'] = []
return file_systems
def get_file_systems(self, file_system_id=None, creation_token=None):
kwargs = dict()
if file_system_id:
kwargs['FileSystemId'] = file_system_id
if creation_token:
kwargs['CreationToken'] = creation_token
try:
file_systems = self.list_file_systems(**kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
results = list()
for item in file_systems:
item['CreationTime'] = str(item['CreationTime'])
"""
In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
AWS documentation is available here:
U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
result = camel_dict_to_snake_dict(item)
result['tags'] = {}
result['mount_targets'] = []
# Set tags *after* doing camel to snake
if result['life_cycle_state'] == self.STATE_AVAILABLE:
try:
result['tags'] = self.get_tags(result['file_system_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
results.append(result)
return results
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'mount_target_id',
'subnet-': 'subnet_id',
'eni-': 'network_interface_id',
'sg-': 'security_groups'
}
return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
if str(attr_id).startswith(prefix)], 'ip_address')
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount target requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(aliases=['creation_token']),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(fs_id, name)
if tags:
file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
file_systems_info = connection.get_mount_targets_data(file_systems_info)
file_systems_info = connection.get_security_groups_data(file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
if __name__ == '__main__':
main()
|
baile/infojoiner
|
refs/heads/master
|
infojoiner/importers/bookmarks_tests.py
|
1
|
import unittest
from ConfigParser import SafeConfigParser
from bookmarks import BookmarksImporter
class BookmarksImporterTest(unittest.TestCase):
def setUp(self):
self.bi = BookmarksImporter()
self.config = self.bi.open_config()
self.path = self.bi.get_config(self.config,'user','home')
self.dirlist = self.bi.get_path(self.path)
""" test de integracion """
def test_bookmarkbackups_folder_exits_in_user_home(self):
self.assertFalse(None==self.dirlist)
self.assertEquals(type([]), type(self.dirlist))
def test_bookmarks_files_exits_in_user_home(self):
self.assertFalse(None==self.dirlist)
self.assertEquals(type([]), type(self.dirlist))
filelist=self.bi.get_all_files(self.dirlist)
self.assertFalse(None==filelist)
self.assertEquals(type([]), type(filelist))
def test_config_home_correct(self):
self.assertEquals('/home/jon', self.path)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(BookmarksImporterTest)
unittest.TextTestRunner(verbosity=7).run(suite)
|
Pragmatismo/Pigrow
|
refs/heads/master
|
resources/examples/RF_off.py
|
1
|
#!/usr/bin/python3
import datetime, sys, os
homedir = os.getenv("HOME")
sys.path.append(homedir + '/Pigrow/scripts/')
import pigrow_defs
for argu in sys.argv[1:]:
if argu == '-h' or argu == '--help':
print("Pigrow RX off switch")
print("")
sys.exit()
if argu == "-flags":
print("")
def RF_off(set_dic, switch_log):
script = 'RF_off.py'
msg =("\n")
msg +=(" #############################################\n")
msg +=(" ## Turning the RF - OFF ##\n")
if 'gpio_RF' in set_dic and not str(set_dic['gpio_RF']).strip() == '':
gpio_pin = int(set_dic['gpio_RF'])
from rpi_rf import RFDevice
rfdevice = RFDevice(gpio_pin)
rfdevice.enable_tx()
rfdevice.tx_code(15465750, 1, 432)
rfdevice.tx_repeat=5
msg +=(" ## by switching GPIO "+str(gpio_pin)+" to "+gpio_pin_dir+" ##\n")
msg +=(" #############################################\n")
pigrow_defs.set_condition(condition_name="RF", trig_direction="off", cooldown="none")
pigrow_defs.write_log(script, 'RF turned off', switch_log)
return msg
else:
msg +=(" !! NO RF PIN SET !!\n")
msg +=(" !! run config program or edit config.txt !!\n")
msg +=(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
pigrow_defs.write_log(script, 'Failed - due to none set in config', switch_log)
return msg
if __name__ == '__main__':
### default settings
loc_dic = pigrow_defs.load_locs(homedir + "/Pigrow/config/dirlocs.txt")
set_dic = pigrow_defs.load_settings(loc_dic['loc_settings'], err_log=loc_dic['err_log'],)
msg = RF_off(set_dic, loc_dic['loc_switchlog'])
print (msg)
|
jpulec/django-rest-framework
|
refs/heads/master
|
rest_framework/templatetags/rest_framework.py
|
15
|
from __future__ import absolute_import, unicode_literals
import re
from django import template
from django.core.urlresolvers import NoReverseMatch, reverse
from django.utils import six
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import escape, smart_urlquote
from django.utils.safestring import SafeData, mark_safe
from rest_framework.renderers import HTMLFormRenderer
from rest_framework.utils.urls import replace_query_param
register = template.Library()
# Regex for adding classes to html snippets
class_re = re.compile(r'(?<=class=["\'])(.*)(?=["\'])')
@register.simple_tag
def get_pagination_html(pager):
return pager.to_html()
@register.simple_tag
def render_field(field, style=None):
style = style or {}
renderer = style.get('renderer', HTMLFormRenderer())
return renderer.render_field(field, style)
@register.simple_tag
def optional_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return ''
snippet = "<li><a href='{href}?next={next}'>Log in</a></li>".format(href=login_url, next=escape(request.path))
return snippet
@register.simple_tag
def optional_logout(request, user):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
return '<li class="navbar-text">{user}</li>'.format(user=user)
snippet = """<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
{user}
<b class="caret"></b>
</a>
<ul class="dropdown-menu">
<li><a href='{href}?next={next}'>Log out</a></li>
</ul>
</li>"""
return snippet.format(user=user, href=logout_url, next=escape(request.path))
@register.simple_tag
def add_query_param(request, key, val):
"""
Add a query parameter to the current request url, and return the new url.
"""
iri = request.get_full_path()
uri = iri_to_uri(iri)
return escape(replace_query_param(uri, key, val))
@register.filter
def add_class(value, css_class):
"""
http://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = six.text_type(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value
# Bunch of stuff cloned from urlize
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "']", "'}", "'"]
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'),
('"', '"'), ("'", "'")]
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
def smart_urlquote_wrapper(matched_url):
"""
Simple wrapper for smart_urlquote. ValueError("Invalid IPv6 URL") can
be raised here, see issue #1386
"""
try:
return smart_urlquote(matched_url)
except ValueError:
return None
@register.filter
def urlize_quoted_links(text, trim_url_limit=None, nofollow=True, autoescape=True):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
return limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1
):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote_wrapper(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote_wrapper('http://%s' % middle)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
@register.filter
def break_long_headers(header):
"""
Breaks headers longer than 160 characters (~page length)
when possible (are comma separated)
"""
if len(header) > 160 and ',' in header:
header = mark_safe('<br> ' + ', <br>'.join(header.split(',')))
return header
|
dincamihai/django-allauth
|
refs/heads/master
|
allauth/account/app_settings.py
|
34
|
class AppSettings(object):
class AuthenticationMethod:
USERNAME = 'username'
EMAIL = 'email'
USERNAME_EMAIL = 'username_email'
class EmailVerificationMethod:
# After signing up, keep the user account inactive until the email
# address is verified
MANDATORY = 'mandatory'
# Allow login with unverified e-mail (e-mail verification is
# still sent)
OPTIONAL = 'optional'
# Don't send e-mail verification mails during signup
NONE = 'none'
def __init__(self, prefix):
self.prefix = prefix
# If login is by email, email must be required
assert (not self.AUTHENTICATION_METHOD
== self.AuthenticationMethod.EMAIL) or self.EMAIL_REQUIRED
# If login includes email, login must be unique
assert (self.AUTHENTICATION_METHOD
== self.AuthenticationMethod.USERNAME) or self.UNIQUE_EMAIL
assert (self.EMAIL_VERIFICATION
!= self.EmailVerificationMethod.MANDATORY) \
or self.EMAIL_REQUIRED
if not self.USER_MODEL_USERNAME_FIELD:
assert not self.USERNAME_REQUIRED
assert self.AUTHENTICATION_METHOD \
not in (self.AuthenticationMethod.USERNAME,
self.AuthenticationMethod.USERNAME_EMAIL)
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def DEFAULT_HTTP_PROTOCOL(self):
return self._setting("DEFAULT_HTTP_PROTOCOL", "http")
@property
def EMAIL_CONFIRMATION_EXPIRE_DAYS(self):
"""
Determines the expiration date of e-mail confirmation mails (#
of days)
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_EXPIRE_DAYS",
getattr(settings, "EMAIL_CONFIRMATION_DAYS", 3))
@property
def EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case of an authenticated user
"""
return self._setting("EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL",
None)
@property
def EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL(self):
"""
The URL to redirect to after a successful e-mail confirmation, in
case no user is logged in
"""
from django.conf import settings
return self._setting("EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL",
settings.LOGIN_URL)
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
return self._setting("EMAIL_REQUIRED", False)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
ret = self._setting("EMAIL_VERIFICATION",
self.EmailVerificationMethod.OPTIONAL)
# Deal with legacy (boolean based) setting
if ret is True:
ret = self.EmailVerificationMethod.MANDATORY
elif ret is False:
ret = self.EmailVerificationMethod.OPTIONAL
return ret
@property
def AUTHENTICATION_METHOD(self):
from django.conf import settings
if hasattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION"):
import warnings
warnings.warn("ACCOUNT_EMAIL_AUTHENTICATION is deprecated,"
" use ACCOUNT_AUTHENTICATION_METHOD",
DeprecationWarning)
if getattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION"):
ret = self.AuthenticationMethod.EMAIL
else:
ret = self.AuthenticationMethod.USERNAME
else:
ret = self._setting("AUTHENTICATION_METHOD",
self.AuthenticationMethod.USERNAME)
return ret
@property
def UNIQUE_EMAIL(self):
"""
Enforce uniqueness of e-mail addresses
"""
return self._setting("UNIQUE_EMAIL", True)
@property
def SIGNUP_PASSWORD_VERIFICATION(self):
"""
Signup password verification
"""
return self._setting("SIGNUP_PASSWORD_VERIFICATION", True)
@property
def PASSWORD_MIN_LENGTH(self):
"""
Minimum password Length
"""
return self._setting("PASSWORD_MIN_LENGTH", 6)
@property
def EMAIL_SUBJECT_PREFIX(self):
"""
Subject-line prefix to use for email messages sent
"""
return self._setting("EMAIL_SUBJECT_PREFIX", None)
@property
def SIGNUP_FORM_CLASS(self):
"""
Signup form
"""
return self._setting("SIGNUP_FORM_CLASS", None)
@property
def USERNAME_REQUIRED(self):
"""
The user is required to enter a username when signing up
"""
return self._setting("USERNAME_REQUIRED", True)
@property
def USERNAME_MIN_LENGTH(self):
"""
Minimum username Length
"""
return self._setting("USERNAME_MIN_LENGTH", 1)
@property
def USERNAME_BLACKLIST(self):
"""
List of usernames that are not allowed
"""
return self._setting("USERNAME_BLACKLIST", [])
@property
def PASSWORD_INPUT_RENDER_VALUE(self):
"""
render_value parameter as passed to PasswordInput fields
"""
return self._setting("PASSWORD_INPUT_RENDER_VALUE", False)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.account.adapter.DefaultAccountAdapter')
@property
def CONFIRM_EMAIL_ON_GET(self):
return self._setting('CONFIRM_EMAIL_ON_GET', False)
@property
def LOGIN_ON_EMAIL_CONFIRMATION(self):
"""
Automatically log the user in once they confirmed their email address
"""
return self._setting('LOGIN_ON_EMAIL_CONFIRMATION', False)
@property
def LOGIN_ON_PASSWORD_RESET(self):
"""
Automatically log the user in immediately after resetting their password.
"""
return self._setting('LOGIN_ON_PASSWORD_RESET', False)
@property
def LOGOUT_REDIRECT_URL(self):
return self._setting('LOGOUT_REDIRECT_URL', '/')
@property
def LOGOUT_ON_GET(self):
return self._setting('LOGOUT_ON_GET', False)
@property
def LOGOUT_ON_PASSWORD_CHANGE(self):
return self._setting('LOGOUT_ON_PASSWORD_CHANGE', False)
@property
def USER_MODEL_USERNAME_FIELD(self):
return self._setting('USER_MODEL_USERNAME_FIELD', 'username')
@property
def USER_MODEL_EMAIL_FIELD(self):
return self._setting('USER_MODEL_EMAIL_FIELD', 'email')
@property
def SESSION_COOKIE_AGE(self):
"""
Remembered sessions expire after this many seconds.
Defaults to 1814400 seconds which is 3 weeks.
"""
return self._setting('SESSION_COOKIE_AGE', 60 * 60 * 24 * 7 * 3)
@property
def SESSION_REMEMBER(self):
"""
Controls the life time of the session. Set to `None` to ask the user
("Remember me?"), `False` to not remember, and `True` to always
remember.
"""
return self._setting('SESSION_REMEMBER', None)
@property
def FORMS(self):
return self._setting('FORMS', {})
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys
app_settings = AppSettings('ACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
|
mark-ignacio/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/rules/gyptest-default.py
|
137
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', "Hello from file1.in\n")
test.must_match('relocate/src/subdir2/file2.out', "Hello from file2.in\n")
test.pass_test()
|
zanderle/django
|
refs/heads/master
|
tests/custom_methods/tests.py
|
225
|
from __future__ import unicode_literals
from datetime import date
from django.test import TestCase
from .models import Article
class MethodsTests(TestCase):
def test_custom_methods(self):
a = Article.objects.create(
headline="Parrot programs in Python", pub_date=date(2005, 7, 27)
)
b = Article.objects.create(
headline="Beatles reunite", pub_date=date(2005, 7, 27)
)
self.assertFalse(a.was_published_today())
self.assertQuerysetEqual(
a.articles_from_same_day_1(), [
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
a.articles_from_same_day_2(), [
"Beatles reunite",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
b.articles_from_same_day_1(), [
"Parrot programs in Python",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
b.articles_from_same_day_2(), [
"Parrot programs in Python",
],
lambda a: a.headline
)
|
nakato/smashcache
|
refs/heads/master
|
smashcache/pages/errors.py
|
1
|
# Copyright (c) 2015 Sachi King
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HTTPError(Exception):
"""Base Error class for all HTTP errors"""
pass
class error500(HTTPError):
"""Return a simple 500 to the client trapping the error"""
status = "500 Internal Server Error"
response_headers = []
response_body = [b'']
class error502(HTTPError):
"""Return a simple 502 to the client trapping the error"""
status = "502 Bad Gateway"
response_headers = []
response_body = [b'']
class error404(HTTPError):
"""Return a 404 to the user"""
status = "404 Not Found"
response_headers = []
response_body = [b'']
class error400(HTTPError):
"""Return a 400 to the user"""
status = "400 Invalid Request"
response_headers = []
response_body = [b'']
|
mjsull/Contiguity
|
refs/heads/master
|
setup.py
|
1
|
# cx_Freeze setup file
import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["khmer"]}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "Contiguity",
version = "1.0.4",
description = "Assembly graph construction and visualisation.",
options = {"build_exe": build_exe_options},
executables = [Executable("Contiguity.py", base=base)])
|
dzolnierz/mysql-utilities
|
refs/heads/master
|
mysql/fabric/replication.py
|
1
|
#
# Copyright (c) 2013,2015, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""This module contains abstractions of MySQL replication features.
"""
import time
import uuid as _uuid
import mysql.fabric.errors as _errors
import mysql.fabric.server as _server
from mysql.fabric.server_utils import (
split_host_port
)
_RPL_USER_QUERY = (
"SELECT user, host, password != '' as has_password "
"FROM mysql.user "
"WHERE repl_slave_priv = 'Y'"
)
_MASTER_POS_WAIT = "SELECT MASTER_POS_WAIT(%s, %s, %s)"
_GTID_WAIT = "SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS(%s, %s)"
IO_THREAD = "IO_THREAD"
SQL_THREAD = "SQL_THREAD"
@_server.server_logging
def get_master_status(server):
"""Return the master status. In order to ease the navigation through
the result set, a named tuple is always returned. Look up the `SHOW
MASTER STATUS` command in the MySQL Manual for further details.
:param server: MySQL Server.
"""
return server.exec_stmt("SHOW MASTER STATUS", {"columns" : True})
@_server.server_logging
def reset_master(server):
"""Reset the master. Look up the `RESET MASTER` command in the
MySQL Manual for further details.
:param server: MySQL Server.
"""
server.exec_stmt("RESET MASTER")
@_server.server_logging
def has_appropriate_privileges(server):
"""Check whether the current user has the `REPLICATION SLAVE PRIVILEGE`.
:param server: MySQL Server.
"""
return server.has_privileges(["REPLICATION SLAVE"])
@_server.server_logging
def check_master_issues(server):
"""Check if there is any issue to make the server a master.
This method checks if there is any issue to make the server a master.
and returns a dictionary that contains information on any issue found
, if there is any. Basically, it checks if the master is alive and
kicking, if the binary log is enabled, if the GTID is enabled, if the
server is able to log the updates through the SQL Thread and finally
if there is a user that has the `REPLICATION SLAVE PRIVILEGE`.
The dictionary returned may have the following keys::
status['is_not_running'] = False
status['is_binlog_not_enabled'] = False
status['is_gtid_not_enabled'] = False
status['is_slave_updates_not_enabled'] = False
status['no_rpl_user'] = False
:param server: MySQL Server.
:return: Whether there is an issue or not and a dictionary with issues,
if there is any.
.. note::
It does not consider if there are filters or some binary logs have been
purged and by consequence the associated GTIDs. These are also important
characteristics before considering a server eligible for becoming a
master.
"""
status = {
'is_not_running' : False,
'is_binlog_not_enabled' : False,
'is_gtid_not_enabled' : False,
'is_slave_updates_not_enabled' : False,
'no_rpl_user' : False
}
if not server.is_connected():
status["is_not_running"] = True
return True, status
# Check for binlog.
if not server.binlog_enabled:
status["is_binlog_not_enabled"] = True
# Check for gtid.
if not server.gtid_enabled:
status["is_gtid_not_enabled"] = True
# Check for slave updates.
if not server.get_variable("LOG_SLAVE_UPDATES"):
status["is_slave_updates_not_enabled"] = True
# See if the current user has the appropriate replication privilege(s)
if not has_appropriate_privileges(server):
status["no_rpl_user"] = True
error = not all([v is False for v in status.itervalues()])
return error, status
@_server.server_logging
def get_slave_status(server):
"""Return the slave status. In order to ease the navigation through
the result set, a named tuple is always returned. Look up the `SHOW
SLAVE STATUS` command in the MySQL Manual for further details.
:param server: MySQL Server.
"""
return server.exec_stmt("SHOW SLAVE STATUS", {"columns" : True})
@_server.server_logging
def is_slave_thread_running(server, threads=None):
"""Check to see if slave's threads are running smoothly.
:param server: MySQL Server.
"""
return _check_condition(server, threads, True)
@_server.server_logging
def slave_has_master(server):
"""Return the master's uuid to which the slave is connected to.
:param server: MySQL Server.
:return: Master's uuid or None.
:rtype: String.
"""
ret = get_slave_status(server)
if ret:
try:
str_uuid = ret[0].Master_UUID
_uuid.UUID(str_uuid)
return str_uuid
except ValueError:
pass
return None
@_server.server_logging
def get_num_gtid(gtids, server_uuid=None):
"""Return the number of transactions represented in GTIDs.
By default this function considers any server in GTIDs. So if one wants
to count transactions from a specific server, the parameter server_uuid
must be defined.
:param gtids: Set of transactions.
:param server_uuid: Which server one should consider where None means
all.
"""
sid = None
difference = 0
for gtid in gtids.split(","):
# Exctract the server_uuid and the trx_ids.
trx_ids = None
if gtid.find(":") != -1:
sid, trx_ids = gtid.split(":")
else:
if not sid:
raise _errors.ProgrammingError(
"Malformed GTID (%s)." % (gtid, )
)
trx_ids = gtid
# Ignore differences if server_uuid is set and does
# not match.
if server_uuid and str(server_uuid).upper() != sid.upper():
continue
# Check the difference.
difference += 1
if trx_ids.find("-") != -1:
lgno, rgno = trx_ids.split("-")
difference += int(rgno) - int(lgno)
return difference
def get_slave_num_gtid_behind(server, master_gtids, master_uuid=None):
"""Get the number of transactions behind the master.
:param server: MySQL Server.
:param master_gtids: GTID information retrieved from the master.
See :meth:`~mysql.fabric.server.MySQLServer.get_gtid_status`.
:param master_uuid: Master which is used as the basis for comparison.
:return: Number of transactions behind master.
"""
gtids = None
master_gtids = master_gtids[0].GTID_EXECUTED
slave_gtids = server.get_gtid_status()[0].GTID_EXECUTED
# The subtract function does not accept empty strings.
if master_gtids == "" and slave_gtids != "":
raise _errors.InvalidGtidError(
"It is not possible to check the lag when the "
"master's GTID is empty."
)
elif master_gtids == "" and slave_gtids == "":
return 0
elif slave_gtids == "":
gtids = master_gtids
else:
assert (master_gtids != "" and slave_gtids != "")
gtids = server.exec_stmt("SELECT GTID_SUBTRACT(%s,%s)",
{"params": (master_gtids, slave_gtids)})[0][0]
if gtids == "":
return 0
return get_num_gtid(gtids, master_uuid)
@_server.server_logging
def start_slave(server, threads=None, wait=False, timeout=None):
"""Start the slave. Look up the `START SLAVE` command in the MySQL
Manual for further details.
:param server: MySQL Server.
:param threads: Determine which threads shall be started.
:param wait: Determine whether one shall wait until the thread(s)
start(s) or not.
:type wait: Bool
:param timeout: Time in seconds after which one gives up waiting for
thread(s) to start.
The parameter `threads` determine which threads shall be started. If
None is passed as parameter, both the `SQL_THREAD` and the `IO_THREAD`
are started.
"""
threads = threads or ()
server.exec_stmt("START SLAVE " + ", ".join(threads))
if wait:
wait_for_slave_thread(server, timeout=timeout, wait_for_running=True,
threads=threads)
@_server.server_logging
def stop_slave(server, threads=None, wait=False, timeout=None):
"""Stop the slave. Look up the `STOP SLAVE` command in the MySQL
Manual for further details.
:param server: MySQL Server.
:param threads: Determine which threads shall be stopped.
:param wait: Determine whether one shall wait until the thread(s)
stop(s) or not.
:type wait: Bool
:param timeout: Time in seconds after which one gives up waiting for
thread(s) to stop.
The parameter `threads` determine which threads shall be stopped. If
None is passed as parameter, both the `SQL_THREAD` and the `IO_THREAD`
are stopped.
"""
threads = threads or ()
server.exec_stmt("STOP SLAVE " + ", ".join(threads))
if wait:
wait_for_slave_thread(server, timeout=timeout, wait_for_running=False,
threads=threads)
@_server.server_logging
def reset_slave(server, clean=False):
"""Reset the slave. Look up the `RESET SLAVE` command in the MySQL
Manual for further details.
:param server: MySQL Server.
:param clean: Do not save master information such as host, user, etc.
"""
param = "ALL" if clean else ""
server.exec_stmt("RESET SLAVE %s" % (param, ))
@_server.server_logging
def wait_for_slave_thread(server, timeout=None, wait_for_running=True,
threads=None):
"""Wait until slave's threads stop or start.
If timeout is None, one waits indefinitely until the condition is
achieved. If the timeout period expires prior to achieving the
condition the exception TimeoutError is raised.
:param server: MySQL Server.
:param timeout: Number of seconds one waits until the condition is
achieved. If it is None, one waits indefinitely.
:param wait_for_running: If one should check whether threads are
running or stopped.
:type check_if_running: Bool
:param threads: Which threads should be checked.
:type threads: `SQL_THREAD` or `IO_THREAD`.
"""
while (timeout is None or timeout > 0) and \
not _check_condition(server, threads, wait_for_running):
time.sleep(1)
timeout = timeout - 1 if timeout is not None else None
if not _check_condition(server, threads, wait_for_running):
raise _errors.TimeoutError(
"Error waiting for slave's thread(s) to either start or stop."
)
@_server.server_logging
def wait_for_slave(server, binlog_file, binlog_pos, timeout=0):
"""Wait for the slave to read the master's binlog up to a specified
position.
This methods call the MySQL function `SELECT MASTER_POS_WAIT`. If
the timeout period expires prior to achieving the condition the
:class:`~mysql.fabric.errors.TimeoutError` exception is raised. If any
thread is stopped, the :class:`~mysql.fabric.errors.DatabaseError`
exception is raised.
:param server: MySQL Server.
:param binlog_file: Master's binlog file.
:param binlog_pos: Master's binlog file position.
:param timeout: Maximum number of seconds to wait for the condition to
be achieved.
"""
# Wait for slave to read the master log file
res = server.exec_stmt(_MASTER_POS_WAIT,
{"params": (binlog_file, binlog_pos, timeout)}
)
if res is None or res[0] is None or res[0][0] is None:
raise _errors.DatabaseError(
"Error waiting for slave to catch up. Binary log (%s, %s)." %
(binlog_file, binlog_pos)
)
elif res[0][0] == -1:
raise _errors.TimeoutError(
"Error waiting for slave to catch up. Binary log (%s, %s)." %
(binlog_file, binlog_pos)
)
assert(res[0][0] > -1)
@_server.server_logging
def wait_for_slave_status_thread(server, thread, status, timeout=None):
"""Wait until a slave's thread exhibits a status.
The status is a sub-string of the current status: Slave_IO_state or
Slave_SQL_Running_State.
If timeout is None, one waits indefinitely until the condition is
achieved. If the timeout period expires prior to achieving the
condition the exception TimeoutError is raised.
:param server: MySQL Server.
:param thread: Which thread should be checked.
:type thread: `SQL_THREAD` or `IO_THREAD`.
:status: Which status should be checked.
:type status: string.
:param timeout: Number of seconds one waits until the condition is
achieved. If it is None, one waits indefinitely.
"""
while (timeout is None or timeout > 0) and \
not _check_status_condition(server, thread, status):
time.sleep(1)
timeout = timeout - 1 if timeout is not None else None
if not _check_status_condition(server, thread, status):
raise _errors.TimeoutError(
"Error waiting for slave's thread (%s) to exhibit status (%s)." %
(thread, status)
)
@_server.server_logging
def sync_slave_with_master(slave, master, timeout=0):
"""Synchronizes a slave with a master.
See :func:`wait_for_slave_gtid`.
This function can block if the master fails and all
transactions are not fetched.
:param slave: Reference to a slave (MySQL Server).
:param master: Reference to the master (MySQL Server).
:param timeout: Timeout for waiting for slave to catch up.
"""
# Check servers for GTID support
if not slave.gtid_enabled or not master.gtid_enabled:
raise _errors.ProgrammingError(
"Global Transaction IDs are not supported."
)
master_gtids = master.get_gtid_status()
master_gtids = master_gtids[0].GTID_EXECUTED.strip(",")
wait_for_slave_gtid(slave, master_gtids, timeout)
@_server.server_logging
def wait_for_slave_gtid(server, gtids, timeout=0):
"""Wait until a slave executes GITDs.
The function `SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` is called until the
slave catches up. If the timeout period expires prior to achieving
the condition the :class:`~mysql.fabric.errors.TimeoutError` exception is
raised. If any thread is stopped, the
:class:`~mysql.fabric.errors.DatabaseError` exception is raised.
:param server: MySQL Server.
:param gtids: Gtid information.
:param timeout: Timeout for waiting for slave to catch up.
"""
# Check servers for GTID support
if not server.gtid_enabled:
raise _errors.ProgrammingError(
"Global Transaction IDs are not supported."
)
res = server.exec_stmt(_GTID_WAIT, {"params": (gtids, timeout)})
if res is None or res[0] is None or res[0][0] is None:
raise _errors.DatabaseError(
"Error waiting for slave to catch up. "
"GTID (%s)." % (gtids, )
)
elif res[0][0] == -1:
raise _errors.TimeoutError(
"Error waiting for slave to catch up. "
"GTID (%s)." % (gtids, )
)
assert(res[0][0] > -1)
@_server.server_logging
def switch_master(slave, master, master_user, master_passwd=None,
from_beginning=True, master_log_file=None,
master_log_pos=None):
"""Switch slave to a new master by executing the `CHANGE MASTER` command.
Look up the command in the MySQL Manual for further details.
This method forms the `CHANGE MASTER` command based on the current
settings of the slave along with the parameters provided and execute
it. No prerequisites are checked.
:param slave: Reference to a slave (MySQL Server).
:param master: Reference to the master (MySQL Server).
:param master_user: Replication user.
:param master_passwd: Replication user password.
:param from_beginning: If True, start from beginning of logged events.
:param master_log_file: Master's log file (not needed for GTID).
:param master_log_pos: master's log file position (not needed for GTID).
"""
commands = []
params = []
master_host, master_port = split_host_port(master.address)
commands.append("MASTER_HOST = %s")
params.append(master_host)
commands.append("MASTER_PORT = %s")
params.append(int(master_port))
commands.append("MASTER_USER = %s")
params.append(master_user)
if master_passwd:
commands.append("MASTER_PASSWORD = %s")
params.append(master_passwd)
else:
commands.append("MASTER_PASSWORD = ''")
if slave.gtid_enabled:
commands.append("MASTER_AUTO_POSITION = 1")
elif not from_beginning:
commands.append("MASTER_LOG_FILE = %s")
params.append(master_log_file)
if master_log_pos >= 0:
commands.append("MASTER_LOG_POS = %s" % master_log_pos)
params.append(master_log_pos)
slave.exec_stmt("CHANGE MASTER TO " + ", ".join(commands),
{"params": tuple(params)})
@_server.server_logging
def check_slave_issues(server):
"""Check slave's health.
This method checks if the slave is setup correctly to operate in a
replication environment and returns a dictionary that contains
information on any issue found, if there is any. Specifically, it
checks if the slave is alive and kicking and whether the `SQL_THREAD`
and `IO_THREAD` are running or not.
The dictionary returned may have the following keys::
status["is_not_running"] = False
status["is_not_configured"] = False
status["io_not_running"] = False
status["sql_not_running"] = False
status["io_error"] = False
status["sql_error"] = False
:param server: MySQL Server.
:return: Whether there is an issue or not and a dictionary with the
issues, if there is any.
"""
status = {
'is_not_running': False,
'is_not_configured': False,
'io_not_running': False,
'sql_not_running': False,
'io_error': False,
'sql_error': False
}
if not server.is_connected():
status["is_not_running"] = True
return True, status
ret = get_slave_status(server)
if not ret:
status["is_not_configured"] = True
return True, status
if ret[0].Slave_IO_Running.upper() != "YES":
status["io_not_running"] = True
if ret[0].Slave_SQL_Running.upper() != "YES":
status["sql_not_running"] = True
if ret[0].Last_IO_Errno and ret[0].Last_IO_Errno > 0:
status["io_error"] = ret[0].Last_IO_Error
if ret[0].Last_SQL_Errno and ret[0].Last_SQL_Errno > 0:
status["sql_error"] = ret[0].Last_SQL_Error
error = not all([v is False for v in status.itervalues()])
return error, status
@_server.server_logging
def check_slave_delay(slave, master):
"""Check slave's delay.
It checks if both the master and slave are alive and kicking, whether
the `SQL_THREAD` and `IO_THREAD` are running or not. It reports the
`SQL_Delay`, `Seconds_Behind_Master` and finally if GTIDs are enabled
the number of transactions behind master.
The dictionary returned may have the following keys::
status["is_not_running"] = False
status["is_not_configured"] = False
status["sql_delay"] = Value
status["seconds_behind"] = Value
status["gtids_behind"] = Value
:param slave: Reference to a slave (MySQL Server).
:param master: Reference to the master (MySQL Server).
:return: A dictionary with delays, if there is any.
"""
status = {
'is_not_running': False,
'is_not_configured': False,
'sql_delay': 0,
'seconds_behind': 0,
'gtids_behind': 0
}
if not slave.is_connected() or not master.is_connected():
status["is_not_running"] = True
return status
slave_status = get_slave_status(slave)
if not slave_status:
status["is_not_configured"] = True
return status
# Check if the slave must lag behind the master.
sql_delay = slave_status[0].SQL_Delay
if sql_delay:
status["sql_delay"] = sql_delay
# Check if the slave is lagging behind the master.
seconds_behind = slave_status[0].Seconds_Behind_Master
if seconds_behind:
status["seconds_behind"] = seconds_behind
# Check gtid trans behind.
if slave.gtid_enabled:
master_gtid_status = master.get_gtid_status()
num_gtids_behind = get_slave_num_gtid_behind(slave,
master_gtid_status,
master.uuid)
if num_gtids_behind:
status["gtids_behind"] = num_gtids_behind
return status
def _check_condition(server, threads, check_if_running):
"""Check if slave's threads are either running or stopped. If the
`SQL_THREAD` or the `IO_THREAD` are stopped and there is an error,
the :class:`~mysql.fabric.errors.DatabaseError` exception is raised.
:param server: MySQL Server.
:param threads: Which threads should be checked.
:type threads: `SQL_THREAD` or `IO_THREAD`.
:param check_if_running: If one should check whether threads are
running or stopped.
:type check_if_running: Bool
"""
if not threads:
threads = (SQL_THREAD, IO_THREAD)
assert(isinstance(threads, tuple))
io_status = not check_if_running
sql_status = not check_if_running
check_stmt = "YES" if check_if_running else "NO"
io_errno = sql_errno = 0
io_error = sql_error = ""
ret = get_slave_status(server)
if ret:
io_status = ret[0].Slave_IO_Running.upper() == check_stmt
io_error = ret[0].Last_IO_Error
io_errno = ret[0].Last_IO_Errno
io_errno = io_errno if io_errno else 0
sql_status = ret[0].Slave_SQL_Running.upper() == check_stmt
sql_error = ret[0].Last_SQL_Error
sql_errno = ret[0].Last_SQL_Errno
sql_errno = sql_errno if sql_errno else 0
achieved = True
if SQL_THREAD in threads:
achieved = achieved and sql_status
if check_if_running and sql_errno != 0:
raise _errors.DatabaseError(sql_error)
if IO_THREAD in threads:
achieved = achieved and io_status
if check_if_running and io_errno != 0:
raise _errors.DatabaseError(io_error)
return achieved
def _check_status_condition(server, thread, status):
"""Check if a slave's thread has the requested status. If the `SQL_THREAD`
or the `IO_THREAD` is stopped and there is an error, the following
:class:`~mysql.fabric.errors.DatabaseError` exception is raised.
:param server: MySQL Server.
:param thread: Which thread should be checked.
:type thread: `SQL_THREAD` or `IO_THREAD`.
:param status: The status to be checked.
"""
io_errno = sql_errno = 0
io_error = sql_error = ""
achieved = False
ret = get_slave_status(server)
if not ret:
return achieved
if SQL_THREAD == thread:
sql_status = True if status in ret[0].Slave_SQL_Running_State else False
sql_error = ret[0].Last_SQL_Error
sql_errno = ret[0].Last_SQL_Errno
if sql_errno and sql_errno != 0:
raise _errors.DatabaseError(sql_error)
achieved = sql_status
elif IO_THREAD == thread:
io_status = True if status in ret[0].Slave_IO_State else False
io_error = ret[0].Last_IO_Error
io_errno = ret[0].Last_IO_Errno
if io_errno and io_errno != 0:
raise _errors.DatabaseError(io_error)
achieved = io_status
return achieved
def synchronize_with_read_only(slave, master, trnx_lag=0, timeout=5):
"""Synchronize the master with the slave. The function accepts a transaction
lag and a timeout parameters.
The transaction lag is used to determine that number of transactions the
slave can lag behind the master before the master is locked to enable a
complete sync.
The timeout indicates the amount of time to wait for before taking a read
lock on the master to enable a complete sync with the slave. The transaction
lag alone is not enough to ensure that the slave catches up and at sometime
we have to assume that the slave will not catch up and lock the source
shard.
:param slave: Reference to a slave (MySQL Server).
:param master: Reference to the master (MySQL Server).
:param trnx_lag: The number of transactions by which the slave can lag the
master before we can take a lock.
:param timeout: The timeout for which we should wait before taking a
read lock on the master.
"""
#Flag indicates if we are synced enough to take a read lock.
synced = False
#Syncing basically means that we either ensure that the slave
#is "trnx_lag" transactions behind the master within the given
#timeout. If the slave has managed to reach within "trnx_lag"
#transactions we take a read lock and sync. We also take a read
#lock and sync if the timeout has exceeded.
while not synced:
start_time = time.time()
try:
sync_slave_with_master(slave, master, timeout)
master_gtids = master.get_gtid_status()
if get_slave_num_gtid_behind(slave, master_gtids) <= trnx_lag:
synced = True
else:
#Recalculate the amount of time left in the timeout, because
#part of the time has already been used up when the code
#reaches here.
timeout = timeout - (time.time() - start_time)
if timeout <= 0:
synced = True
except _errors.TimeoutError:
#If the code flow reaches here the timeout has been exceeded.
#We lock the master and let the master and slave sync at this
#point.
break
#At this point we lock the master and let the slave sync with the master.
#This step is common across the entire algorithm. The preceeding steps
#just help minimize the amount of time for which we take a read lock.
master.read_only = True
sync_slave_with_master(slave, master, timeout=0)
|
gilamsalem/pynfs
|
refs/heads/master
|
nfs4.0/nfs4acl.py
|
3
|
#
# nfs4acl.py - some useful acl code
#
# Written by Fred Isaman <iisaman@citi.umich.edu>
# Copyright (C) 2004 University of Michigan, Center for
# Information Technology Integration
#
# Taken from mapping description at
# http://www.citi.umich.edu/projects/nfsv4/rfc/draft-ietf-nfsv4-acl-mapping-02.txt
from nfs4_const import *
from nfs4_type import *
# Taken from mapping
MODE_R = ACE4_READ_DATA | ACE4_READ_NAMED_ATTRS
MODE_W = ACE4_WRITE_DATA | ACE4_WRITE_NAMED_ATTRS | ACE4_APPEND_DATA
MODE_X = ACE4_EXECUTE
DMODE_R = ACE4_LIST_DIRECTORY | ACE4_READ_NAMED_ATTRS
DMODE_W = ACE4_ADD_FILE | ACE4_WRITE_NAMED_ATTRS | \
ACE4_ADD_SUBDIRECTORY | ACE4_DELETE_CHILD
DMODE_X = ACE4_EXECUTE
FLAG_ALL = ACE4_READ_ACL | ACE4_READ_ATTRIBUTES | ACE4_SYNCHRONIZE
FLAG_OWN = ACE4_WRITE_ACL | ACE4_READ_ACL | ACE4_WRITE_ATTRIBUTES
FLAG_NONE = ACE4_DELETE
DDEFAULT = ACE4_INHERIT_ONLY_ACE | ACE4_DIRECTORY_INHERIT_ACE | \
ACE4_FILE_INHERIT_ACE
# Where is this required?
USED_BITS = 0x1f01ff
# Useful abbreviations
ALLOWED = ACE4_ACCESS_ALLOWED_ACE_TYPE
DENIED = ACE4_ACCESS_DENIED_ACE_TYPE
GROUP = ACE4_IDENTIFIER_GROUP
GROUP_OBJ = ACE4_IDENTIFIER_GROUP # Or is it 0? RFC and map are unclear
MODES = [ 0, MODE_X, MODE_W, MODE_X | MODE_W,
MODE_R, MODE_R | MODE_X, MODE_R | MODE_W,
MODE_R | MODE_X | MODE_W ]
DMODES = [ 0, DMODE_X, DMODE_W, DMODE_X | DMODE_W,
DMODE_R, DMODE_R | DMODE_X, DMODE_R | DMODE_W,
DMODE_R | DMODE_X | DMODE_W ]
class ACLError(Exception):
def __init__(self, msg=None):
if msg is None:
self.msg = "ACL error"
else:
self.msg = str(msg)
def __str__(self):
return self.msg
def negate(flags):
"""Return the opposite flags"""
if flags & ~USED_BITS:
raise ACLError("Flag %x contains unused bits" % flags)
return ~flags & USED_BITS & ~FLAG_NONE
def mode2acl(mode, dir=False):
"""Translate a 3-digit octal mode into a posix compatible acl"""
if dir: modes = DMODES
else: modes = MODES
owner = modes[(mode & 0700)//0100] | FLAG_ALL | FLAG_OWN
group = modes[(mode & 0070)//010] | FLAG_ALL
other = modes[(mode & 0007)] | FLAG_ALL
return [ nfsace4(ALLOWED, 0, owner, "OWNER@"),
nfsace4(DENIED, 0, negate(owner), "OWNER@"),
nfsace4(ALLOWED, GROUP_OBJ, group, "GROUP@"),
nfsace4(DENIED, GROUP_OBJ, negate(group), "GROUP@"),
nfsace4(ALLOWED, 0, other, "EVERYONE@"),
nfsace4(DENIED, 0, negate(other), "EVERYONE@")
]
def acl2mode(acl):
"""Translate an acl into a 3-digit octal mode"""
names = ["OWNER@", "GROUP@", "EVERYONE@"]
short = [ace for ace in acl if ace.who in names]
perms = dict.fromkeys(names, None)
modes = [[MODE_R, 4], [MODE_W, 2], [MODE_X, 1]]
for ace in short:
if perms[ace.who] is not None: continue
if ace.type == ALLOWED:
bits = 0
for mode, bit in modes:
if mode & ace.access_mask == mode:
bits |= bit
perms[ace.who] = bits
elif ace.type == DENIED:
bits = 7
for mode, bit in modes:
if mode & ace.access_mask:
bits &= ~bit
perms[ace.who] = bits
# If it wasn't mentioned, assume the worse
for key in perms:
if perms[key] is None:
perm[keys] = 0
return perms["OWNER@"]*0100 + perms["GROUP@"]*010 + perms["EVERYONE@"]
def maps_to_posix(acl):
"""Raises ACLError if acl does not map to posix """
""" FRED - there are all sorts of things this does not yet check for.
1 - the mapping allows only certain sets of access_mask
2 - Only 4 different flags values are allowed
3 - How to handle mixed default/active on a directory?
"""
len_acl = len(acl)
if len_acl < 6:
raise ACLError("Acl length %i is too short" % len_acl)
if len_acl > 7 and len_acl%3 != 1:
raise ACLError("Acl length %i does not equal 1 mod 3" % len_acl)
flags = acl[0].flag
if flags != 0: # FIXME and flags != DDEFAULT:
raise ACLError("Illegal flag value %x" % flags)
list = acl[:]
not_mask = chk_owners(list, flags)
chk_groups(list, flags, not_mask)
chk_everyone(list, flags)
def chk_pair(allow, deny, who, flags):
"""Checks consistancy of allow/deny pair, forcing it to have given args"""
if allow.type != ALLOWED or deny.type != DENIED:
raise ACLError("Wrong type in allow/deny pair")
if not (flags == allow.flag == deny.flag):
raise ACLError("Pair does not have required flags %x" % flags)
if negate(allow.access_mask) != deny.access_mask:
raise ACLError("Pair access masks %x and %x are not complementary.\n"
"Expected inverse of %x is %x." %
(allow.access_mask, deny.access_mask,
allow.access_mask, negate(allow.access_mask)))
if not (who == allow.who == deny.who):
raise ACLError("Pair does not have required who %s" % who)
def chk_triple(mask, allow, deny, flags, not_mask):
chk_pair(allow, deny, mask.who, flags)
if mask.type != DENIED:
raise ACLError("Triple mask does not have type DENIED")
if flags != mask.flag:
raise ACLError("Triple mask does not have required flags %x" % flags)
if not_mask != mask.access_mask:
raise ACLError("Triple mask is not same as a previous mask")
def chk_everyone(acl, flags):
if len(acl) != 2:
raise ACLError("Had %i ACEs left when called chk_everyone" % len(acl))
chk_pair(acl[0], acl[1], "EVERYONE@", flags)
def chk_owners(acl, flags):
chk_pair(acl[0], acl[1], "OWNER@", flags)
del acl[:2]
used = []
not_mask = None
while True:
if len(acl) < 3:
raise ACLError("Ran out of ACEs in chk_owners")
mask = acl[0]
if mask.who.endswith("@") or mask.flag & GROUP:
return not_mask
if not_mask is None:
if mask.access_mask & ~USED_BITS:
raise ACLError("Mask %x contains unused bits" %
mask.access_mask)
not_mask = mask.access_mask
allow = acl[1]
deny = acl[2]
if mask.who in used:
raise ACLError("Owner name %s duplicated" % mask.who)
chk_triple(mask, allow, deny, flags, not_mask)
used.append(mask.who)
del acl[:3]
def chk_groups(acl, flags, not_mask):
mask = acl[0]
if mask.who != "GROUP@":
raise ACLError("Expected GROUP@, got %s" % mask.who)
if mask.type == ALLOWED and not_mask is None:
# Special case of no mask
chk_pair(acl[0], acl[1], "GROUP@", flags | GROUP_OBJ)
del acl[:2]
return
if not_mask is None:
if mask.access_mask & ~USED_BITS:
raise ACLError("Mask %x contains unused bits" % mask.access_mask)
not_mask = mask.access_mask
used = ["EVERYONE@"]
pairs = []
while mask.who not in used:
if len(acl) < 3:
raise ACLError("Ran out of ACEs in chk_groups")
used.append(mask.who)
pairs.append([mask, acl[1]])
del acl[:2]
mask = acl[0]
if len(acl) < len(used):
raise ACLError("Ran out of ACEs in chk_groups")
for mask, allow in pairs:
if mask.who == "GROUP@":
chk_triple(mask, allow, acl[0], flags | GROUP_OBJ, not_mask)
else:
chk_triple(mask, allow, acl[0], flags | GROUP, not_mask)
del acl[:1]
def printableacl(acl):
type_str = ["ACCESS", "DENY"]
out = ""
for ace in acl:
out += "<type=%6s, flag=%2x, access=%8x, who=%s>\n" % \
(type_str[ace.type], ace.flag, ace.access_mask, ace.who)
#print "leaving printableacl with out = %s" % out
return out
|
Maistho/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/media/_base/media/__init__.py
|
81
|
from .main import MediaPlugin
def autoload():
return MediaPlugin()
|
JonathanStein/odoo
|
refs/heads/8.0
|
addons/report/__openerp__.py
|
385
|
{
'name': 'Report',
'category': 'Base',
'summary': 'Report',
'version': '1.0',
'description': """
Report
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'views/layouts.xml',
'views/views.xml',
'data/report_paperformat.xml',
'security/ir.model.access.csv',
'views/report.xml',
],
'installable': True,
'auto_install': True,
}
|
croach/p5.py
|
refs/heads/master
|
lib/p5/perlin.py
|
2
|
"""Perlin noise
"""
import math
from random import Random
PERLIN_YWRAPB = 4
PERLIN_YWRAP = 1<<PERLIN_YWRAPB
PERLIN_ZWRAPB = 8
PERLIN_ZWRAP = 1<<PERLIN_ZWRAPB
PERLIN_SIZE = 4095
perlin_octaves = 4 # default to medium smooth
perlin_amp_falloff = 0.5 # 50% reduction/octave
# TODO: Instead of these being global to the module, create a class instead
# and in the mathfuncs module create a global instance of the class
# and functions to wrap it. That way it can be used in a thread safe
# manner.
perlinRandom = None
perlin = None
DEG_TO_RAD = math.pi/180.0;
SINCOS_PRECISION = 0.5;
SINCOS_LENGTH = int(360 / SINCOS_PRECISION)
perlin_cos_table = [math.cos(i * DEG_TO_RAD * SINCOS_PRECISION) for i in xrange(SINCOS_LENGTH)]
perlin_TWOPI = perlin_PI = SINCOS_LENGTH
perlin_PI >>= 1;
def noise(*args):
"""Computes the Perlin noise (1D, 2D, or 3D) value at the specified coords.
"""
global perlin, perlinRandom
x = args[0]
y = args[1] if len(args) > 1 else 0
z = args[2] if len(args) > 2 else 0
if perlinRandom is None:
perlinRandom = Random()
if perlin is None:
perlin = [perlinRandom.random() for i in xrange(PERLIN_SIZE + 1)]
x = abs(x)
x = abs(x)
z = abs(z)
xi, yi, zi = int(x), int(y), int(z)
xf, yf, zf = x - xi, y - yi, z - zi
r = 0
ampl = 0.5
for i in range(perlin_octaves):
of = xi + (yi<<PERLIN_YWRAPB) + (zi<<PERLIN_ZWRAPB)
rxf = noise_fsc(xf)
ryf = noise_fsc(yf)
n1 = perlin[of&PERLIN_SIZE];
n1 += rxf*(perlin[(of+1)&PERLIN_SIZE]-n1);
n2 = perlin[(of+PERLIN_YWRAP)&PERLIN_SIZE];
n2 += rxf*(perlin[(of+PERLIN_YWRAP+1)&PERLIN_SIZE]-n2);
n1 += ryf*(n2-n1);
of += PERLIN_ZWRAP;
n2 = perlin[of&PERLIN_SIZE];
n2 += rxf*(perlin[(of+1)&PERLIN_SIZE]-n2);
n3 = perlin[(of+PERLIN_YWRAP)&PERLIN_SIZE];
n3 += rxf*(perlin[(of+PERLIN_YWRAP+1)&PERLIN_SIZE]-n3);
n2 += ryf*(n3-n2);
n1 += noise_fsc(zf)*(n2-n1);
r += n1*ampl;
ampl *= perlin_amp_falloff;
xi<<=1; xf*=2;
yi<<=1; yf*=2;
zi<<=1; zf*=2;
if xf >= 1.0: xi += 1; xf -= 1;
if yf >= 1.0: yi += 1; yf -= 1;
if zf >= 1.0: zi += 1; zf -= 1;
return r;
# [toxi 031112]
# now adjusts to the size of the cosLUT used via
# the new variables, defined above
def noise_fsc(i):
# using bagel's cosine table instead
return 0.5 * (1.0 - perlin_cos_table[int(i*perlin_PI) % perlin_TWOPI])
# # [toxi 040903]
# # make perlin noise quality user controlled to allow
# # for different levels of detail. lower values will produce
# # smoother results as higher octaves are surpressed
# public void noiseDetail(int lod) {
# if (lod>0) perlin_octaves=lod;
# }
# public void noiseDetail(int lod, float falloff) {
# if (lod>0) perlin_octaves=lod;
# if (falloff>0) perlin_amp_falloff=falloff;
# }
def noiseSeed(what):
global perlinRandom, perlin
if perlinRandom is None:
perlinRandom = Random()
perlinRandom.seed(what)
perlin = None
|
kincl/oncall
|
refs/heads/master
|
oncall_old/settings.py
|
2
|
# These are defaults, set yours somewhere else
class Defaults(object):
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
ROLES = ['Primary','Secondary']
ONCALL_START = 1
LOG_FILE = False
SYSLOG = False
|
dls-controls/pymalcolm
|
refs/heads/master
|
malcolm/modules/pmac/parts/__init__.py
|
1
|
# Expose a nice namespace
from malcolm.core import submodule_all
from .beamselectorpart import BeamSelectorPart
from .compoundmotorsinkportspart import (
AGroup,
APartName,
ARbv,
CompoundMotorSinkPortsPart,
)
from .cspart import AMri, CSPart
from .cssourceportspart import AGroup, APartName, ARbv, CSSourcePortsPart
from .motorpremovepart import AMri, APartName, MotorPreMovePart
from .pmacchildpart import AMri, APartName, PmacChildPart
from .pmacstatuspart import PmacStatusPart
from .pmactrajectorypart import AMri, APartName, PmacTrajectoryPart
from .rawmotorsinkportspart import AGroup, RawMotorSinkPortsPart
__all__ = submodule_all(globals())
|
n3wb13/OpenNfrGui-5.0-1
|
refs/heads/master
|
lib/python/Components/Converter/Converter.py
|
166
|
from Components.Element import Element
class Converter(Element):
def __init__(self, arguments):
Element.__init__(self)
self.converter_arguments = arguments
def __repr__(self):
return str(type(self)) + "(" + self.converter_arguments + ")"
def handleCommand(self, cmd):
self.source.handleCommand(cmd)
|
kartikprabhu/manifold_note
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# use requirements.txt for dependencies
with open('requirements.txt') as f:
required = map(lambda s: s.strip(), f.readlines())
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='manifold_note',
version='0.1.1',
description='A CRUD system to add store notes in a file-storage',
long_description=readme,
install_requires=required,
author='Kartik Prabhu',
author_email='me@kartikprabhu.com',
url='https://github.com/kartikprabhu/manifold_note',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
package_data={'': ['templates/*']},
)
|
Cinntax/home-assistant
|
refs/heads/dev
|
tests/components/command_line/__init__.py
|
36
|
"""Tests for command_line component."""
|
ddico/sale-workflow
|
refs/heads/8.0
|
sale_order_revision/model/sale_order.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# @author Lorenzo Battistini <lorenzo.battistini@agilebg.com>
# @author Raphaël Valyi <raphael.valyi@akretion.com> (ported to sale from
# original purchase_order_revision by Lorenzo Battistini)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api
from openerp.tools.translate import _
class sale_order(models.Model):
_inherit = "sale.order"
current_revision_id = fields.Many2one('sale.order',
'Current revision',
readonly=True,
copy=True)
old_revision_ids = fields.One2many('sale.order',
'current_revision_id',
'Old revisions',
readonly=True,
context={'active_test': False})
revision_number = fields.Integer('Revision',
copy=False)
unrevisioned_name = fields.Char('Order Reference',
copy=True,
readonly=True)
active = fields.Boolean('Active',
default=True,
copy=True)
_sql_constraints = [
('revision_unique',
'unique(unrevisioned_name, revision_number, company_id)',
'Order Reference and revision must be unique per Company.'),
]
@api.multi
def copy_quotation(self):
self.ensure_one()
revision_self = self.with_context(new_sale_revision=True)
action = super(sale_order, revision_self).copy_quotation()
old_revision = self.browse(action['res_id'])
action['res_id'] = self.id
self.delete_workflow()
self.create_workflow()
self.write({'state': 'draft'})
self.order_line.write({'state': 'draft'})
# remove old procurements
self.mapped('order_line.procurement_ids').write(
{'sale_line_id': False})
msg = _('New revision created: %s') % self.name
self.message_post(body=msg)
old_revision.message_post(body=msg)
return action
@api.returns('self', lambda value: value.id)
@api.multi
def copy(self, default=None):
if default is None:
default = {}
if self.env.context.get('new_sale_revision'):
prev_name = self.name
revno = self.revision_number
self.write({'revision_number': revno + 1,
'name': '%s-%02d' % (self.unrevisioned_name,
revno + 1)
})
default.update({
'name': prev_name,
'revision_number': revno,
'active': False,
'state': 'cancel',
'current_revision_id': self.id,
'unrevisioned_name': self.unrevisioned_name,
})
return super(sale_order, self).copy(default=default)
@api.model
def create(self, values):
if not self.env.context.get('new_sale_revision'):
if values.get('name', '/') == '/':
seq = self.env['ir.sequence']
values['name'] = seq.next_by_code('sale.order') or '/'
values['unrevisioned_name'] = values['name']
return super(sale_order, self).create(values)
|
bhcopeland/ansible-modules-extras
|
refs/heads/devel
|
windows/win_file_version.py
|
65
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Get DLL or EXE build version
# Copyright © 2015 Sam Liu <sam.liu@activenetwork.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_file_version
version_added: "2.1"
short_description: Get DLL or EXE file build version
description:
- Get DLL or EXE file build version
- change state alway be false
options:
path:
description:
- File to get version(provide absolute path)
required: true
aliases: []
author: Sam Liu
'''
EXAMPLES = '''
# get C:\Windows\System32\cmd.exe version in playbook
---
- name: Get acm instance version
win_file_version:
path: 'C:\Windows\System32\cmd.exe'
register: exe_file_version
- debug: msg="{{exe_file_version}}"
'''
RETURN = """
win_file_version.path:
description: file path
returned: always
type: string
win_file_version.file_version:
description: file version number.
returned: no error
type: string
win_file_version.product_version:
description: the version of the product this file is distributed with.
returned: no error
type: string
win_file_version.file_major_part:
description: the major part of the version number.
returned: no error
type: string
win_file_version.file_minor_part:
description: the minor part of the version number of the file.
returned: no error
type: string
win_file_version.file_build_part:
description: build number of the file.
returned: no error
type: string
win_file_version.file_private_part:
description: file private part number.
returned: no error
type: string
"""
|
doganov/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/bookmarks/tests/test_tasks.py
|
11
|
"""
Tests for tasks.
"""
import ddt
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import check_mongo_calls
from ..models import XBlockCache
from ..tasks import _calculate_course_xblocks_data, _update_xblocks_cache
from .test_models import BookmarksTestsBase
@ddt.ddt
class XBlockCacheTaskTests(BookmarksTestsBase):
"""
Test the XBlockCache model.
"""
def setUp(self):
super(XBlockCacheTaskTests, self).setUp()
self.course_expected_cache_data = {
self.course.location: [
[],
], self.chapter_1.location: [
[
self.course.location,
],
], self.chapter_2.location: [
[
self.course.location,
],
], self.sequential_1.location: [
[
self.course.location,
self.chapter_1.location,
],
], self.sequential_2.location: [
[
self.course.location,
self.chapter_1.location,
],
], self.vertical_1.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_1.location,
],
], self.vertical_2.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_2.location,
],
], self.vertical_3.location: [
[
self.course.location,
self.chapter_1.location,
self.sequential_2.location,
],
],
}
self.other_course_expected_cache_data = { # pylint: disable=invalid-name
self.other_course.location: [
[],
], self.other_chapter_1.location: [
[
self.other_course.location,
],
], self.other_sequential_1.location: [
[
self.other_course.location,
self.other_chapter_1.location,
],
], self.other_sequential_2.location: [
[
self.other_course.location,
self.other_chapter_1.location,
],
], self.other_vertical_1.location: [
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_1.location,
],
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_2.location,
]
], self.other_vertical_2.location: [
[
self.other_course.location,
self.other_chapter_1.location,
self.other_sequential_1.location,
],
],
}
@ddt.data(
(ModuleStoreEnum.Type.mongo, 2, 2, 3),
(ModuleStoreEnum.Type.mongo, 4, 2, 3),
(ModuleStoreEnum.Type.mongo, 2, 3, 4),
(ModuleStoreEnum.Type.mongo, 4, 3, 4),
(ModuleStoreEnum.Type.mongo, 2, 4, 5),
# (ModuleStoreEnum.Type.mongo, 4, 4, 6), Too slow.
(ModuleStoreEnum.Type.split, 2, 2, 3),
(ModuleStoreEnum.Type.split, 4, 2, 3),
(ModuleStoreEnum.Type.split, 2, 3, 3),
(ModuleStoreEnum.Type.split, 2, 4, 3),
)
@ddt.unpack
def test_calculate_course_xblocks_data_queries(self, store_type, children_per_block, depth, expected_mongo_calls):
course = self.create_course_with_blocks(children_per_block, depth, store_type)
with check_mongo_calls(expected_mongo_calls):
blocks_data = _calculate_course_xblocks_data(course.id)
self.assertGreater(len(blocks_data), children_per_block ** depth)
@ddt.data(
('course',),
('other_course',)
)
@ddt.unpack
def test_calculate_course_xblocks_data(self, course_attr):
"""
Test that the xblocks data is calculated correctly.
"""
course = getattr(self, course_attr)
blocks_data = _calculate_course_xblocks_data(course.id)
expected_cache_data = getattr(self, course_attr + '_expected_cache_data')
for usage_key, __ in expected_cache_data.items():
for path_index, path in enumerate(blocks_data[unicode(usage_key)]['paths']):
for path_item_index, path_item in enumerate(path):
self.assertEqual(
path_item['usage_key'], expected_cache_data[usage_key][path_index][path_item_index]
)
@ddt.data(
('course', 47),
('other_course', 34)
)
@ddt.unpack
def test_update_xblocks_cache(self, course_attr, expected_sql_queries):
"""
Test that the xblocks data is persisted correctly.
"""
course = getattr(self, course_attr)
with self.assertNumQueries(expected_sql_queries):
_update_xblocks_cache(course.id)
expected_cache_data = getattr(self, course_attr + '_expected_cache_data')
for usage_key, __ in expected_cache_data.items():
xblock_cache = XBlockCache.objects.get(usage_key=usage_key)
for path_index, path in enumerate(xblock_cache.paths):
for path_item_index, path_item in enumerate(path):
self.assertEqual(
path_item.usage_key, expected_cache_data[usage_key][path_index][path_item_index + 1]
)
with self.assertNumQueries(3):
_update_xblocks_cache(course.id)
|
Shrhawk/edx-platform
|
refs/heads/master
|
common/djangoapps/third_party_auth/tests/specs/test_linkedin.py
|
112
|
"""Integration tests for LinkedIn providers."""
from third_party_auth.tests.specs import base
class LinkedInOauth2IntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for provider.LinkedInOauth2."""
def setUp(self):
super(LinkedInOauth2IntegrationTest, self).setUp()
self.provider = self.configure_linkedin_provider(
enabled=True,
key='linkedin_oauth2_key',
secret='linkedin_oauth2_secret',
)
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
}
USER_RESPONSE_DATA = {
'lastName': 'lastName_value',
'id': 'id_value',
'firstName': 'firstName_value',
}
def get_username(self):
response_data = self.get_response_data()
return response_data.get('firstName') + response_data.get('lastName')
|
sekikn/ambari
|
refs/heads/trunk
|
ambari-agent/src/test/python/resource_management/TestXmlConfigResource.py
|
2
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
import time
from unittest import TestCase
from mock.mock import patch, MagicMock, ANY
from only_for_platform import get_platform, not_for_platform, os_distro_value, PLATFORM_WINDOWS
from ambari_commons.os_check import OSCheck
from resource_management.core import Environment
from resource_management.core.system import System
from resource_management.libraries import XmlConfig
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
class TestXmlConfigResource(TestCase):
"""
XmlConfig="resource_management.libraries.providers.xml_config.XmlConfigProvider",
Testing XmlConfig(XmlConfigProvider) with different 'resource configurations'
"""
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_empty_xml_config(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'create' action - creates new non existent xml file and write proper data
where configurations={}
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Wed 2014-02'
with Environment('/') as env:
XmlConfig('file.xml',
conf_dir='/dir/conf',
configurations={},
configuration_attributes={}
)
create_file_mock.assert_called_with('/dir/conf/file.xml', u' <configuration xmlns:xi="http://www.w3.org/2001/XInclude">\n \n </configuration>',
encoding='UTF-8', on_file_created=ANY)
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_simple_xml_config(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'create' action - creates new non existent xml file and write proper data
where configurations={"Some conf":"Some value"}
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Wed 2014-02'
with Environment('/') as env:
XmlConfig('file.xml',
conf_dir='/dir/conf',
configurations={'property1': 'value1'},
configuration_attributes={'attr': {'property1': 'attr_value'}}
)
create_file_mock.assert_called_with('/dir/conf/file.xml', u' <configuration xmlns:xi="http://www.w3.org/2001/XInclude">\n \n <property>\n <name>property1</name>\n <value>value1</value>\n <attr>attr_value</attr>\n </property>\n \n </configuration>',
encoding='UTF-8', on_file_created=ANY)
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_simple_xml_config_with_inclusion(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'create' action - creates new non existent xml file and write proper data
where configurations={"Some conf":"Some value"}
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Wed 2014-02'
with Environment('/') as env:
XmlConfig('file.xml',
conf_dir='/dir/conf',
configurations={'property1': 'value1'},
configuration_attributes={'attr': {'property1': 'attr_value'}},
xml_include_file="/dif/conf/include_file.xml"
)
create_file_mock.assert_called_with('/dir/conf/file.xml', u' <configuration xmlns:xi="http://www.w3.org/2001/XInclude">\n \n <property>\n <name>property1</name>\n <value>value1</value>\n <attr>attr_value</attr>\n </property>\n \n <xi:include href="/dif/conf/include_file.xml"/>\n \n </configuration>',
encoding='UTF-8', on_file_created=ANY)
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_xml_config_with_metacharacters(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'create' action - creates new non existent xml file and write proper data
where configurations={"Some conf":"Some metacharacters"}
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Wed 2014-02'
with Environment('/') as env:
XmlConfig('file.xml',
conf_dir='/dir/conf',
configurations={"": "",
"prop.1": "'.'yyyy-MM-dd-HH",
"prop.3": "%d{ISO8601} %5p %c{1}:%L - %m%n",
"prop.2": "INFO, openjpa",
"prop.4": "${oozie.log.dir}/oozie.log",
"prop.empty": "",
},
configuration_attributes={
"": {
"prop.1": "should_not_be_printed",
"prop.2": "should_not_be_printed",
},
"attr1": {
"prop.1": "x",
"prop.8": "not_existed",
},
"attr2": {
"prop.4": "value4",
"prop.3": "value3"
},
"attr_empty": {
},
"attr_value_empty": {
"prop.4": "",
"prop.empty": ""
}
})
create_file_mock.assert_called_with('/dir/conf/file.xml', u' <configuration xmlns:xi="http://www.w3.org/2001/XInclude">\n \n <property>\n <name></name>\n <value></value>\n </property>\n \n <property>\n <name>prop.1</name>\n <value>'.'yyyy-MM-dd-HH</value>\n <attr1>x</attr1>\n </property>\n \n <property>\n <name>prop.2</name>\n <value>INFO, openjpa</value>\n </property>\n \n <property>\n <name>prop.3</name>\n <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>\n <attr2>value3</attr2>\n </property>\n \n <property>\n <name>prop.4</name>\n <value>${oozie.log.dir}/oozie.log</value>\n <attr_value_empty></attr_value_empty>\n <attr2>value4</attr2>\n </property>\n \n <property>\n <name>prop.empty</name>\n <value></value>\n <attr_value_empty></attr_value_empty>\n </property>\n \n </configuration>',
encoding='UTF-8', on_file_created=ANY)
@patch("resource_management.core.providers.system._ensure_metadata")
@patch("resource_management.core.sudo.create_file")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
@patch.object(time, "asctime")
def test_action_create_xml_config_sorted_by_key(self,
time_asctime_mock,
os_path_isdir_mock,
os_path_exists_mock,
create_file_mock,
ensure_mock):
"""
Tests if 'create' action - creates new non existent xml file and writes proper data
where configurations={"Key":"Value"} are stored in sorted by key order
"""
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
time_asctime_mock.return_value = 'Wed 2014-02'
with Environment('/') as env:
XmlConfig('file.xml',
conf_dir='/dir/conf',
configurations={"": "",
"third": "should be third",
"first": "should be first",
"z_last": "should be last",
"second": "should be second",
},
configuration_attributes={}
)
create_file_mock.assert_called_with('/dir/conf/file.xml', u' <configuration xmlns:xi="http://www.w3.org/2001/XInclude">\n \n <property>\n <name></name>\n <value></value>\n </property>\n \n <property>\n <name>first</name>\n <value>should be first</value>\n </property>\n \n <property>\n <name>second</name>\n <value>should be second</value>\n </property>\n \n <property>\n <name>third</name>\n <value>should be third</value>\n </property>\n \n <property>\n <name>z_last</name>\n <value>should be last</value>\n </property>\n \n </configuration>',
encoding='UTF-8', on_file_created=ANY)
@patch("resource_management.libraries.providers.xml_config.File")
@patch("resource_management.core.sudo.path_exists")
@patch("resource_management.core.sudo.path_isdir")
def test_action_create_arguments(self, os_path_isdir_mock ,os_path_exists_mock, file_mock):
os_path_isdir_mock.side_effect = [False, True]
os_path_exists_mock.return_value = False
with Environment() as env:
XmlConfig('xmlFile.xml',
conf_dir='/dir/conf',
configurations={'property1': 'value1'},
configuration_attributes={'attr': {'property1': 'attr_value'}},
mode = 0755,
owner = "hdfs",
group = "hadoop",
encoding = "Code"
)
self.assertEqual(file_mock.call_args[0][0],'/dir/conf/xmlFile.xml')
call_args = file_mock.call_args[1].copy()
del call_args['content']
self.assertEqual(call_args,{'owner': 'hdfs', 'group': 'hadoop', 'mode': 0755, 'encoding' : 'Code'})
|
Gerhut/django-http-file-storage
|
refs/heads/master
|
project/app/migrations/0002_auto_20150929_1428.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.storages
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='media',
name='height',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='media',
name='width',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='media',
name='file',
field=models.ImageField(height_field=b'height', storage=project.storages.HTTPStorage(b'http://localhost:5000/'), width_field=b'width', upload_to=b''),
preserve_default=True,
),
]
|
mnaberez/py65
|
refs/heads/master
|
py65/tests/__init__.py
|
35
|
# this is a package
|
brandond/ansible
|
refs/heads/devel
|
contrib/inventory/openshift.py
|
42
|
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup an Openshift gear
version_added: None
author: Michael Scherer
'''
import json
import os
import os.path
import sys
import ConfigParser
import StringIO
from ansible.module_utils.urls import open_url
configparser = None
def get_from_rhc_config(variable):
global configparser
CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
if os.path.exists(CONF_FILE):
if not configparser:
ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
configparser = ConfigParser.SafeConfigParser()
configparser.readfp(StringIO.StringIO(ini_str))
try:
return configparser.get('root', variable)
except ConfigParser.NoOptionError:
return None
def get_config(env_var, config_var):
result = os.getenv(env_var)
if not result:
result = get_from_rhc_config(config_var)
if not result:
sys.exit("failed=True msg='missing %s'" % env_var)
return result
def get_json_from_api(url, username, password):
headers = {'Accept': 'application/json; version=1.5'}
response = open_url(url, headers=headers, url_username=username, url_password=password)
return json.loads(response.read())['data']
username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
response = get_json_from_api(broker_url + '/domains', username, password)
response = get_json_from_api("%s/domains/%s/applications" %
(broker_url, response[0]['id']), username, password)
result = {}
for app in response:
# ssh://520311404832ce3e570000ff@blog-johndoe.example.org
(user, host) = app['ssh_url'][6:].split('@')
app_name = host.split('-')[0]
result[app_name] = {}
result[app_name]['hosts'] = []
result[app_name]['hosts'].append(host)
result[app_name]['vars'] = {}
result[app_name]['vars']['ansible_ssh_user'] = user
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")
|
usingnamespace/usingnamespace
|
refs/heads/master
|
usingnamespace/management/views/management.py
|
1
|
import logging
log = logging.getLogger(__name__)
from pyramid.view import (
view_config,
view_defaults,
notfound_view_config,
)
from pyramid.config import not_
from pyramid.httpexceptions import HTTPForbidden
from pyramid.security import unauthenticated_userid, authenticated_userid
@view_defaults(
context='..traversal.Root',
route_name='management',
)
class Management(object):
"""Provide all of the views for the main page of management"""
def __init__(self, context, request):
"""Initialises the view class
:context: The traversal context
:request: The current request
"""
self.context = context
self.request = request
@view_config(
renderer='templates/home.mako',
effective_principals='system.Authenticated',
)
def home(self):
userinfo = self.request.user
return {
'sites': userinfo.user.sites,
}
@view_defaults(
containment='..traversal.Root',
route_name='management',
)
class ManagementNotAuthorized(object):
"""Anything related to management that is not authorized"""
def __init__(self, context, request):
"""Initialises the view class
:context: The traversal context
:request: The current request
"""
self.context = context
self.request = request
@view_config(
effective_principals=not_('system.Authenticated')
)
def management_not_authed(self):
raise HTTPForbidden()
@notfound_view_config(
containment='..traversal.Root',
renderer='string',
)
def management_not_found(self):
self.request.response.status_int = 404
return "404 - Not Found"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.