repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
stanley-cheung/grpc | refs/heads/master | src/python/grpcio_tests/tests_aio/unit/__init__.py | 38 | # Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
oihane/odoomrp-wip | refs/heads/8.0 | web_widget_float_time_second/__openerp__.py | 8 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Web Widget Float Time Second",
"version": "8.0.0.1.0",
"depends": ["web",
"base_setup"],
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"website": "http://www.odoomrp.com",
"category": "Hidden",
'data': ["views/web_widget_float_time_second_view.xml",
"views/res_config_view.xml"],
'demo': [],
'installable': True,
'auto_install': False,
}
|
jsirois/pants | refs/heads/master | src/python/pants/backend/python/lint/docformatter/rules_integration_test.py | 1 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import dataclasses
from typing import List, Optional, Sequence, Tuple
import pytest
from pants.backend.python.lint.docformatter.rules import DocformatterFieldSet, DocformatterRequest
from pants.backend.python.lint.docformatter.rules import rules as docformatter_rules
from pants.backend.python.target_types import PythonLibrary
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintResult, LintResults
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import CreateDigest, Digest, FileContent
from pants.engine.target import Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*docformatter_rules(),
QueryRule(LintResults, (DocformatterRequest,)),
QueryRule(FmtResult, (DocformatterRequest,)),
QueryRule(SourceFiles, (SourceFilesRequest,)),
]
)
GOOD_SOURCE = FileContent("good.py", b'"""Good docstring."""\n')
BAD_SOURCE = FileContent("bad.py", b'"""Oops, missing a period"""\n')
FIXED_BAD_SOURCE = FileContent("bad.py", b'"""Oops, missing a period."""\n')
def make_target(rule_runner: RuleRunner, source_files: List[FileContent]) -> Target:
for source_file in source_files:
rule_runner.create_file(f"{source_file.path}", source_file.content.decode())
return PythonLibrary({}, address=Address("", target_name="target"))
def run_docformatter(
rule_runner: RuleRunner,
targets: List[Target],
*,
passthrough_args: Optional[str] = None,
skip: bool = False,
) -> Tuple[Sequence[LintResult], FmtResult]:
args = ["--backend-packages=pants.backend.python.lint.docformatter"]
if passthrough_args:
args.append(f"--docformatter-args='{passthrough_args}'")
if skip:
args.append("--docformatter-skip")
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
field_sets = [DocformatterFieldSet.create(tgt) for tgt in targets]
lint_results = rule_runner.request(LintResults, [DocformatterRequest(field_sets)])
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.sources for field_set in field_sets),
],
)
fmt_result = rule_runner.request(
FmtResult,
[
DocformatterRequest(field_sets, prior_formatter_result=input_sources.snapshot),
],
)
return lint_results.results, fmt_result
def get_digest(rule_runner: RuleRunner, source_files: List[FileContent]) -> Digest:
return rule_runner.request(Digest, [CreateDigest(source_files)])
def test_passing_source(rule_runner: RuleRunner) -> None:
target = make_target(rule_runner, [GOOD_SOURCE])
lint_results, fmt_result = run_docformatter(rule_runner, [target])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 0
assert lint_results[0].stderr == ""
assert fmt_result.output == get_digest(rule_runner, [GOOD_SOURCE])
assert fmt_result.did_change is False
def test_failing_source(rule_runner: RuleRunner) -> None:
target = make_target(rule_runner, [BAD_SOURCE])
lint_results, fmt_result = run_docformatter(rule_runner, [target])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 3
assert lint_results[0].stderr.strip() == BAD_SOURCE.path
assert fmt_result.output == get_digest(rule_runner, [FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_mixed_sources(rule_runner: RuleRunner) -> None:
target = make_target(rule_runner, [GOOD_SOURCE, BAD_SOURCE])
lint_results, fmt_result = run_docformatter(rule_runner, [target])
assert len(lint_results) == 1
assert lint_results[0].exit_code == 3
assert lint_results[0].stderr.strip() == BAD_SOURCE.path
assert fmt_result.output == get_digest(rule_runner, [GOOD_SOURCE, FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner) -> None:
targets = [
make_target(rule_runner, [GOOD_SOURCE]),
make_target(rule_runner, [BAD_SOURCE]),
]
lint_results, fmt_result = run_docformatter(rule_runner, targets)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 3
assert lint_results[0].stderr.strip() == BAD_SOURCE.path
assert fmt_result.output == get_digest(rule_runner, [GOOD_SOURCE, FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_respects_passthrough_args(rule_runner: RuleRunner) -> None:
needs_config = FileContent(
path="needs_config.py",
content=b'"""\nOne line docstring acting like it\'s multiline.\n"""\n',
)
target = make_target(rule_runner, [needs_config])
lint_results, fmt_result = run_docformatter(
rule_runner, [target], passthrough_args="--make-summary-multi-line"
)
assert len(lint_results) == 1
assert lint_results[0].exit_code == 0
assert lint_results[0].stderr == ""
assert fmt_result.output == get_digest(rule_runner, [needs_config])
assert fmt_result.did_change is False
def test_skip(rule_runner: RuleRunner) -> None:
target = make_target(rule_runner, [BAD_SOURCE])
lint_results, fmt_result = run_docformatter(rule_runner, [target], skip=True)
assert not lint_results
assert fmt_result.skipped is True
assert fmt_result.did_change is False
def test_stub_files(rule_runner: RuleRunner) -> None:
good_stub = dataclasses.replace(GOOD_SOURCE, path="good.pyi")
bad_stub = dataclasses.replace(BAD_SOURCE, path="bad.pyi")
fixed_bad_stub = dataclasses.replace(FIXED_BAD_SOURCE, path="bad.pyi")
good_files = [GOOD_SOURCE, good_stub]
target = make_target(rule_runner, good_files)
lint_results, fmt_result = run_docformatter(rule_runner, [target])
assert len(lint_results) == 1 and lint_results[0].exit_code == 0
assert lint_results[0].stderr == "" and fmt_result.stdout == ""
assert fmt_result.output == get_digest(rule_runner, good_files)
assert not fmt_result.did_change
target = make_target(rule_runner, [BAD_SOURCE, bad_stub])
lint_results, fmt_result = run_docformatter(rule_runner, [target])
assert len(lint_results) == 1 and lint_results[0].exit_code == 3
assert bad_stub.path in lint_results[0].stderr
assert BAD_SOURCE.path in lint_results[0].stderr
fixed_bad_files = [FIXED_BAD_SOURCE, fixed_bad_stub]
assert fmt_result.output == get_digest(rule_runner, [*fixed_bad_files, *good_files])
assert fmt_result.did_change
|
apporc/nova | refs/heads/master | nova/conf/scheduler.py | 2 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
host_subset_size_opt = cfg.IntOpt("scheduler_host_subset_size",
default=1,
help="New instances will be scheduled on a host chosen randomly from "
"a subset of the N best hosts. This property defines the subset "
"size that a host is chosen from. A value of 1 chooses the first "
"host returned by the weighing functions. This value must be at "
"least 1. Any value less than 1 will be ignored, and 1 will be "
"used instead")
bm_default_filter_opt = cfg.ListOpt("baremetal_scheduler_default_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ExactRamFilter",
"ExactDiskFilter",
"ExactCoreFilter",
],
help="Which filter class names to use for filtering baremetal hosts "
"when not specified in the request.")
use_bm_filters_opt = cfg.BoolOpt("scheduler_use_baremetal_filters",
default=False,
help="Flag to decide whether to use "
"baremetal_scheduler_default_filters or not.")
host_mgr_avail_filt_opt = cfg.MultiStrOpt("scheduler_available_filters",
default=["nova.scheduler.filters.all_filters"],
help="Filter classes available to the scheduler which may be "
"specified more than once. An entry of "
"'nova.scheduler.filters.all_filters' maps to all filters "
"included with nova.")
host_mgr_default_filt_opt = cfg.ListOpt("scheduler_default_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"RamFilter",
"DiskFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ServerGroupAntiAffinityFilter",
"ServerGroupAffinityFilter",
],
help="Which filter class names to use for filtering hosts when not "
"specified in the request.")
host_mgr_sched_wgt_cls_opt = cfg.ListOpt("scheduler_weight_classes",
default=["nova.scheduler.weights.all_weighers"],
help="Which weight class names to use for weighing hosts")
host_mgr_tracks_inst_chg_opt = cfg.BoolOpt("scheduler_tracks_instance_changes",
default=True,
help="Determines if the Scheduler tracks changes to instances to help "
"with its filtering decisions.")
rpc_sched_topic_opt = cfg.StrOpt("scheduler_topic",
default="scheduler",
help="The topic scheduler nodes listen on")
# This option specifies an option group, so register separately
rpcapi_cap_opt = cfg.StrOpt("scheduler",
help="Set a version cap for messages sent to scheduler services")
scheduler_json_config_location_opt = cfg.StrOpt(
"scheduler_json_config_location",
default="",
help="Absolute path to scheduler configuration JSON file.")
sched_driver_host_mgr_opt = cfg.StrOpt("scheduler_host_manager",
default="nova.scheduler.host_manager.HostManager",
help="The scheduler host manager class to use")
driver_opt = cfg.StrOpt("scheduler_driver",
default="nova.scheduler.filter_scheduler.FilterScheduler",
help="Default driver to use for the scheduler")
driver_period_opt = cfg.IntOpt("scheduler_driver_task_period",
default=60,
help="How often (in seconds) to run periodic tasks in the scheduler "
"driver of your choice. Please note this is likely to interact "
"with the value of service_down_time, but exactly how they "
"interact will depend on your choice of scheduler driver.")
disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio",
default=1.0,
help="Virtual disk to physical disk allocation ratio")
isolated_img_opt = cfg.ListOpt("isolated_images",
default=[],
help="Images to run on isolated host")
isolated_host_opt = cfg.ListOpt("isolated_hosts",
default=[],
help="Host reserved for specific images")
restrict_iso_host_img_opt = cfg.BoolOpt(
"restrict_isolated_hosts_to_isolated_images",
default=True,
help="Whether to force isolated hosts to run only isolated images")
# These opts are registered as a separate OptGroup
trusted_opts = [
cfg.StrOpt("attestation_server",
help="Attestation server HTTP"),
cfg.StrOpt("attestation_server_ca_file",
help="Attestation server Cert file for Identity verification"),
cfg.StrOpt("attestation_port",
default="8443",
help="Attestation server port"),
cfg.StrOpt("attestation_api_url",
default="/OpenAttestationWebServices/V1.0",
help="Attestation web API URL"),
cfg.StrOpt("attestation_auth_blob",
help="Attestation authorization blob - must change"),
cfg.IntOpt("attestation_auth_timeout",
default=60,
help="Attestation status cache valid period length"),
cfg.BoolOpt("attestation_insecure_ssl",
default=False,
help="Disable SSL cert verification for Attestation service")
]
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Tells filters to ignore hosts that have this many or more "
"instances currently in build, resize, snapshot, migrate, rescue "
"or unshelve task states")
agg_img_prop_iso_namespace_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_namespace",
help="Force the filter to consider only keys matching the given "
"namespace.")
agg_img_prop_iso_separator_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_separator",
default=".",
help="The separator used between the namespace and keys")
max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
help="Ignore hosts that have too many instances")
ram_weight_mult_opt = cfg.FloatOpt("ram_weight_multiplier",
default=1.0,
help="Multiplier used for weighing ram. Negative numbers mean to "
"stack vs spread.")
io_ops_weight_mult_opt = cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
help="Multiplier used for weighing host io ops. Negative numbers mean "
"a preference to choose light workload compute hosts.")
# These opts are registered as a separate OptGroup
metrics_weight_opts = [
cfg.FloatOpt("weight_multiplier",
default=1.0,
help="Multiplier used for weighing metrics."),
cfg.ListOpt("weight_setting",
default=[],
help="How the metrics are going to be weighed. This should be in "
"the form of '<name1>=<ratio1>, <name2>=<ratio2>, ...', "
"where <nameX> is one of the metrics to be weighed, and "
"<ratioX> is the corresponding ratio. So for "
"'name1=1.0, name2=-1.0' The final weight would be "
"name1.value * 1.0 + name2.value * -1.0."),
cfg.BoolOpt("required",
default=True,
help="How to treat the unavailable metrics. When a metric is NOT "
"available for a host, if it is set to be True, it would "
"raise an exception, so it is recommended to use the "
"scheduler filter MetricFilter to filter out those hosts. If "
"it is set to be False, the unavailable metric would be "
"treated as a negative factor in weighing process, the "
"returned value would be set by the option "
"weight_of_unavailable."),
cfg.FloatOpt("weight_of_unavailable",
default=float(-10000.0),
help="The final weight value to be returned if required is set to "
"False and any one of the metrics set by weight_setting is "
"unavailable."),
]
scheduler_max_att_opt = cfg.IntOpt("scheduler_max_attempts",
default=3,
help="Maximum number of attempts to schedule an instance")
SIMPLE_OPTS = [host_subset_size_opt,
bm_default_filter_opt,
use_bm_filters_opt,
host_mgr_avail_filt_opt,
host_mgr_default_filt_opt,
host_mgr_sched_wgt_cls_opt,
host_mgr_tracks_inst_chg_opt,
rpc_sched_topic_opt,
sched_driver_host_mgr_opt,
driver_opt,
driver_period_opt,
scheduler_json_config_location_opt,
disk_allocation_ratio_opt,
isolated_img_opt,
isolated_host_opt,
restrict_iso_host_img_opt,
max_io_ops_per_host_opt,
agg_img_prop_iso_namespace_opt,
agg_img_prop_iso_separator_opt,
max_instances_per_host_opt,
ram_weight_mult_opt,
io_ops_weight_mult_opt,
scheduler_max_att_opt,
]
ALL_OPTS = itertools.chain(
SIMPLE_OPTS,
[rpcapi_cap_opt],
trusted_opts,
metrics_weight_opts,
)
def register_opts(conf):
conf.register_opts(SIMPLE_OPTS)
conf.register_opt(rpcapi_cap_opt, "upgrade_levels")
trust_group = cfg.OptGroup(name="trusted_computing",
title="Trust parameters")
conf.register_group(trust_group)
conf.register_opts(trusted_opts, group=trust_group)
conf.register_opts(metrics_weight_opts, group="metrics")
|
R-daneel-olivaw/mutation-tolerance-voting | refs/heads/master | pyvotecore/plurality_at_large.py | 1 | # Copyright (C) 2009, Brad Beattie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyvotecore.abstract_classes import MultipleWinnerVotingSystem
from pyvotecore.common_functions import matching_keys
import types
import copy
class PluralityAtLarge(MultipleWinnerVotingSystem):
def __init__(self, ballots, tie_breaker=None, required_winners=1):
super(PluralityAtLarge, self).__init__(ballots, tie_breaker=tie_breaker, required_winners=required_winners)
def calculate_results(self):
# Standardize the ballot format and extract the candidates
self.candidates = set()
for ballot in self.ballots:
# Convert single candidate ballots into ballot lists
if not isinstance(ballot["ballot"], list):
ballot["ballot"] = [ballot["ballot"]]
# Ensure no ballot has an excess of votes
if len(ballot["ballot"]) > self.required_winners:
raise Exception("A ballot contained too many candidates")
# Add all candidates on the ballot to the set
self.candidates.update(set(ballot["ballot"]))
# Sum up all votes for each candidate
self.tallies = dict.fromkeys(self.candidates, 0)
for ballot in self.ballots:
for candidate in ballot["ballot"]:
self.tallies[candidate] += ballot["count"]
tallies = copy.deepcopy(self.tallies)
# Determine which candidates win
winning_candidates = set()
while len(winning_candidates) < self.required_winners:
# Find the remaining candidates with the most votes
largest_tally = max(tallies.values())
top_candidates = matching_keys(tallies, largest_tally)
# Reduce the found candidates if there are too many
if len(top_candidates | winning_candidates) > self.required_winners:
self.tied_winners = top_candidates.copy()
while len(top_candidates | winning_candidates) > self.required_winners:
top_candidates.remove(self.break_ties(top_candidates, True))
# Move the top candidates into the winning pile
winning_candidates |= top_candidates
for candidate in top_candidates:
del tallies[candidate]
self.winners = winning_candidates
def as_dict(self):
data = super(PluralityAtLarge, self).as_dict()
data["tallies"] = self.tallies
return data
|
adviti/melange | refs/heads/master | app/gdata/Crypto/PublicKey/DSA.py | 228 |
#
# DSA.py : Digital Signature Algorithm
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Hash import SHA
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generateQ(randfunc):
S=randfunc(20)
hash1=SHA.new(S).digest()
hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest()
q = bignum(0)
for i in range(0,20):
c=ord(hash1[i])^ord(hash2[i])
if i==0:
c=c | 128
if i==19:
c= c | 1
q=q*256+c
while (not isPrime(q)):
q=q+2
if pow(2,159L) < q < pow(2,160L):
return S, q
raise error, 'Bad q value generated'
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a DSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
if bits<160:
raise error, 'Key length <160 bits'
obj=DSAobj()
# Generate string S and prime q
if progress_func:
progress_func('p,q\n')
while (1):
S, obj.q = generateQ(randfunc)
n=(bits-1)/160
C, N, V = 0, 2, {}
b=(obj.q >> 5) & 15
powb=pow(bignum(2), b)
powL1=pow(bignum(2), bits-1)
while C<4096:
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
W=V[n] % powb
for k in range(n-1, -1, -1):
W=(W<<160L)+V[k]
X=W+powL1
p=X-(X%(2*obj.q)-1)
if powL1<=p and isPrime(p):
break
C, N = C+1, N+n+1
if C<4096:
break
if progress_func:
progress_func('4096 multiples failed\n')
obj.p = p
power=(p-1)/obj.q
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y = x, pow(g, x, p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj
Construct a DSA object from a 4- or 5-tuple of numbers.
"""
obj=DSAobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class DSAobj(pubkey):
keydata=['y', 'g', 'p', 'q', 'x']
def _encrypt(self, s, Kstr):
raise error, 'DSA algorithm cannot encrypt data'
def _decrypt(self, s):
raise error, 'DSA algorithm cannot decrypt data'
def _sign(self, M, K):
if (K<2 or self.q<=K):
raise error, 'K is not between 2 and q'
r=pow(self.g, K, self.p) % self.q
s=(inverse(K, self.q)*(M+self.x*r)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
w=inverse(s, self.q)
u1, u2 = (M*w) % self.q, (r*w) % self.q
v1 = pow(self.g, u1, self.p)
v2 = pow(self.y, u2, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.y, self.g, self.p, self.q))
object=DSAobj
generate_py = generate
construct_py = construct
class DSAobj_c(pubkey):
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if self.__dict__.has_key(attr):
self.__dict__[attr]
else:
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
y,g,p,q = state['y'], state['g'], state['p'], state['q']
if not state.has_key('x'):
self.key = _fastmath.dsa_construct(y,g,p,q)
else:
x = state['x']
self.key = _fastmath.dsa_construct(y,g,p,q,x)
def _sign(self, M, K):
return self.key._sign(M, K)
def _verify(self, M, (r, s)):
return self.key._verify(M, r, s)
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.y, self.key.g, self.key.p, self.key.q))
def can_sign(self):
return 1
def can_encrypt(self):
return 0
def generate_c(bits, randfunc, progress_func=None):
obj = generate_py(bits, randfunc, progress_func)
y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x
return construct_c((y,g,p,q,x))
def construct_c(tuple):
key = apply(_fastmath.dsa_construct, tuple)
return DSAobj_c(key)
if _fastmath:
#print "using C version of DSA"
generate = generate_c
construct = construct_c
error = _fastmath.error
|
tima/ansible-modules-core | refs/heads/devel | network/netvisor/pn_show.py | 30 | #!/usr/bin/python
""" PN CLI show commands """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_show
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: Run show commands on nvOS device.
description:
- Execute show command in the nodes and returns the results
read from the device.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
pn_command:
description:
- The C(pn_command) takes a CLI show command as value.
required: true
pn_parameters:
description:
- Display output using a specific parameter. Use 'all' to display possible
output. List of comma separated parameters.
pn_options:
description:
- Specify formatting options.
"""
EXAMPLES = """
- name: run the vlan-show command
pn_show:
pn_command: 'vlan-show'
pn_parameters: id,scope,ports
pn_options: 'layout vertical'
- name: run the vlag-show command
pn_show:
pn_command: 'vlag-show'
pn_parameters: 'id,name,cluster,mode'
pn_options: 'no-show-headers'
- name: run the cluster-show command
pn_show:
pn_command: 'cluster-show'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the show command.
returned: always
type: list
stderr:
description: The set of error responses from the show command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused any change on the target.
returned: always(False)
type: bool
"""
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
command = module.params['pn_command']
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stderr=err.strip(),
changed=False
)
if out:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stdout=out.strip(),
changed=False
)
else:
module.exit_json(
command=cli,
msg='%s: Nothing to display!!!' % command,
changed=False
)
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
pn_command=dict(required=True, type='str'),
pn_parameters=dict(default='all', type='str'),
pn_options=dict(type='str')
)
)
# Accessing the arguments
command = module.params['pn_command']
parameters = module.params['pn_parameters']
options = module.params['pn_options']
# Building the CLI command string
cli = pn_cli(module)
cli += ' %s format %s ' % (command, parameters)
if options:
cli += options
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
double-y/django | refs/heads/master | django/contrib/postgres/aggregates/__init__.py | 625 | from .general import * # NOQA
from .statistics import * # NOQA
|
csridhar/58A78C12-3B74-48F6-B265-887C33ED5F98-odat-5DD613ED-1FE0-4D6A-8A20-4C26C3F2C95B | refs/heads/master | src/server.py | 1 | from flask import abort, Flask, render_template, request, Response
from github_issue_interface import GitHubIssueInterface
import json
class Serve(Flask):
"""
Serve is our AppServer that proxies between GitHub and our client.
It's dumb and only creates a new GitHub Issue.
It tracks all created issues only when its up and running.
"""
def __init__(self, name, authenticated_user):
super(Serve, self).__init__(name)
self._register_routes()
self.user = authenticated_user.user
self.token = authenticated_user.auth_token
self.gh_int = GitHubIssueInterface(self.user, self.token)
def _register_routes(self):
self.add_url_rule('/index', view_func=self.handle_index, methods=['GET'])
self.add_url_rule('/', view_func=self.handle_index, methods=['GET'])
self.add_url_rule('/create', view_func=self.handle_create, methods=['POST'])
def handle_index(self):
return render_template('index.html')
def handle_create(self):
issueData = json.loads(dict(request.form).keys()[0])
title = issueData.get('title')
body = issueData.get('body')
if not title or not body:
return Response("Missing 'title' and/or 'body'", 400)
json_resp = self.gh_int.create_issue(title, body)
return Response("", 201)
|
OnShift/page_object | refs/heads/master | src/page_object/elements/paragraph.py | 1 | from .element import Element
class Paragraph(Element):
"""
Element class to represent HTML Paragraph
"""
|
hsuantien/scikit-learn | refs/heads/master | sklearn/utils/tests/test_stats.py | 304 | import numpy as np
from numpy.testing import TestCase
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.stats import rankdata
_cases = (
# values, method, expected
([100], 'max', [1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
)
def test_cases():
def check_case(values, method, expected):
r = rankdata(values, method=method)
assert_array_equal(r, expected)
for values, method, expected in _cases:
yield check_case, values, method, expected
|
joxeankoret/diaphora | refs/heads/master | pygments/formatters/other.py | 50 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = u'''\
def testNeedsName(self):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = u'''\
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (u''.join(rawbuf),)
during = u''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()
|
rackerlabs/horizon | refs/heads/master | openstack_dashboard/test/tests/quotas.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django import http
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class QuotaTests(test.APITestCase):
def get_usages(self, with_volume=True):
quotas = {'injected_file_content_bytes': {'quota': 1},
'metadata_items': {'quota': 1},
'injected_files': {'quota': 1},
'security_groups': {'quota': 10},
'security_group_rules': {'quota': 20},
'fixed_ips': {'quota': 10},
'ram': {'available': 8976, 'used': 1024, 'quota': 10000},
'floating_ips': {'available': 0, 'used': 2, 'quota': 1},
'instances': {'available': 8, 'used': 2, 'quota': 10},
'cores': {'available': 8, 'used': 2, 'quota': 10}}
if with_volume:
quotas.update({'volumes': {'available': 0, 'used': 3, 'quota': 1},
'snapshots': {'available': 0, 'used': 3,
'quota': 1},
'gigabytes': {'available': 920, 'used': 80,
'quota': 1000}})
return quotas
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',),
quotas: ('is_service_enabled',),
cinder: ('volume_list', 'volume_snapshot_list',
'tenant_quota_get',)})
def test_tenant_quota_usages(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
quotas.is_service_enabled(IsA(http.HttpRequest),
'volume').AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.snapshots.list())
cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages()
# Compare internal structure of usages to expected.
self.assertEquals(quota_usages.usages, expected_output)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',),
quotas: ('is_service_enabled',)})
def test_tenant_quota_usages_without_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
quotas.is_service_enabled(IsA(http.HttpRequest),
'volume').AndReturn(False)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages(with_volume=False)
# Compare internal structure of usages to expected.
self.assertEquals(quota_usages.usages, expected_output)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',),
quotas: ('is_service_enabled',)})
def test_tenant_quota_usages_no_instances_running(self):
quotas.is_service_enabled(IsA(http.HttpRequest),
'volume').AndReturn(False)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[], False])
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages(with_volume=False)
expected_output.update({
'ram': {'available': 10000, 'used': 0, 'quota': 10000},
'floating_ips': {'available': 1, 'used': 0, 'quota': 1},
'instances': {'available': 10, 'used': 0, 'quota': 10},
'cores': {'available': 10, 'used': 0, 'quota': 10}})
# Compare internal structure of usages to expected.
self.assertEquals(quota_usages.usages, expected_output)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'tenant_quota_get',),
api.network: ('tenant_floating_ip_list',),
quotas: ('is_service_enabled',),
cinder: ('volume_list', 'volume_snapshot_list',
'tenant_quota_get',)})
def test_tenant_quota_usages_unlimited_quota(self):
inf_quota = self.quotas.first()
inf_quota['ram'] = -1
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
quotas.is_service_enabled(IsA(http.HttpRequest),
'volume').AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(inf_quota)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.snapshots.list())
cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
self.mox.ReplayAll()
quota_usages = quotas.tenant_quota_usages(self.request)
expected_output = self.get_usages()
expected_output.update({'ram': {'available': float("inf"),
'used': 1024,
'quota': float("inf")}})
# Compare internal structure of usages to expected.
self.assertEquals(quota_usages.usages, expected_output)
|
aaxelb/SHARE | refs/heads/develop | tests/share/tasks/test_job_consumers.py | 2 | from unittest import mock
import uuid
import pytest
from share.tasks import harvest
from share.tasks import ingest
from share.tasks.jobs import HarvestJobConsumer
from share.tasks.jobs import IngestJobConsumer
from tests import factories
@pytest.mark.parametrize('task, Consumer', [
(harvest, HarvestJobConsumer),
(ingest, IngestJobConsumer),
])
@pytest.mark.parametrize('kwargs', [
{},
{'foo': 1},
{'foo': 1, 'bar': 'baz'},
])
def test_task_calls_consumer(task, Consumer, kwargs, monkeypatch):
monkeypatch.setattr(Consumer, 'consume', mock.Mock())
task.apply(kwargs=kwargs)
assert Consumer.consume.call_count == 1
assert Consumer.consume.call_args == ((), kwargs)
@pytest.mark.django_db
@pytest.mark.parametrize('Consumer, JobFactory', [
(HarvestJobConsumer, factories.HarvestJobFactory),
(IngestJobConsumer, factories.IngestJobFactory),
])
class TestJobConsumer:
@pytest.fixture
def consumer(self, Consumer, JobFactory, monkeypatch):
monkeypatch.setattr(Consumer, '_consume_job', mock.Mock())
return Consumer(task=mock.Mock(**{'request.id': uuid.uuid4()}))
def test_no_job(self, consumer):
consumer.consume()
assert not consumer._consume_job.called
def test_job_not_found(self, consumer):
with pytest.raises(consumer.Job.DoesNotExist):
consumer.consume(job_id=17)
assert not consumer._consume_job.called
@pytest.mark.skip(reason='consume() with no job_id is temporarily a noop')
def test_job_locked(self, consumer, JobFactory):
job = JobFactory()
with consumer.Job.objects.all().lock_first(consumer.lock_field):
consumer.consume()
assert not consumer._consume_job.called
job.refresh_from_db()
assert job.status == job.STATUS.created
def test_skip_duplicated(self, consumer, JobFactory):
job = JobFactory(completions=1, status=consumer.Job.STATUS.succeeded)
consumer.consume(job_id=job.id)
job.refresh_from_db()
assert job.status == job.STATUS.skipped
assert job.task_id == consumer.task.request.id
assert not consumer._consume_job.called
@pytest.mark.skip(reason='consume() with no job_id is temporarily a noop')
def test_obsolete(self, consumer, JobFactory, monkeypatch):
monkeypatch.setattr(consumer, '_update_versions', mock.Mock(return_value=False))
job = JobFactory()
consumer.consume()
job.refresh_from_db()
assert job.status == job.STATUS.skipped
assert job.error_context == job.SkipReasons.obsolete.value
assert job.task_id == consumer.task.request.id
assert not consumer._consume_job.called
@pytest.mark.skip(reason='consume() with no job_id is temporarily a noop')
@pytest.mark.parametrize('exhaust', [True, False])
def test_consume(self, consumer, JobFactory, exhaust):
job = JobFactory()
consumer.consume(exhaust=exhaust)
if exhaust:
assert consumer.task.apply_async.call_count == 1
assert consumer.task.apply_async.call_args == ((consumer.task.request.args, consumer.task.request.kwargs), {})
else:
assert not consumer.task.apply_async.called
assert consumer._consume_job.call_count == 1
assert consumer._consume_job.call_args == ((job,), {'force': False, 'superfluous': False})
|
wskplho/sl4a | refs/heads/master | python/xmpppy/xmpp/auth.py | 196 | ## auth.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: auth.py,v 1.41 2008/09/13 21:45:21 normanr Exp $
"""
Provides library with all Non-SASL and SASL authentication mechanisms.
Can be used both for client and transport authentication.
"""
from protocol import *
from client import PlugIn
import sha,base64,random,dispatcher,re
import md5
def HH(some): return md5.new(some).hexdigest()
def H(some): return md5.new(some).digest()
def C(some): return ':'.join(some)
class NonSASL(PlugIn):
""" Implements old Non-SASL (JEP-0078) authentication used in jabberd1.4 and transport authentication."""
def __init__(self,user,password,resource):
""" Caches username, password and resource for auth. """
PlugIn.__init__(self)
self.DBG_LINE='gen_auth'
self.user=user
self.password=password
self.resource=resource
def plugin(self,owner):
""" Determine the best auth method (digest/0k/plain) and use it for auth.
Returns used method name on success. Used internally. """
if not self.resource: return self.authComponent(owner)
self.DEBUG('Querying server about possible auth methods','start')
resp=owner.Dispatcher.SendAndWaitForResponse(Iq('get',NS_AUTH,payload=[Node('username',payload=[self.user])]))
if not isResultNode(resp):
self.DEBUG('No result node arrived! Aborting...','error')
return
iq=Iq(typ='set',node=resp)
query=iq.getTag('query')
query.setTagData('username',self.user)
query.setTagData('resource',self.resource)
if query.getTag('digest'):
self.DEBUG("Performing digest authentication",'ok')
query.setTagData('digest',sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest())
if query.getTag('password'): query.delChild('password')
method='digest'
elif query.getTag('token'):
token=query.getTagData('token')
seq=query.getTagData('sequence')
self.DEBUG("Performing zero-k authentication",'ok')
hash = sha.new(sha.new(self.password).hexdigest()+token).hexdigest()
for foo in xrange(int(seq)): hash = sha.new(hash).hexdigest()
query.setTagData('hash',hash)
method='0k'
else:
self.DEBUG("Sequre methods unsupported, performing plain text authentication",'warn')
query.setTagData('password',self.password)
method='plain'
resp=owner.Dispatcher.SendAndWaitForResponse(iq)
if isResultNode(resp):
self.DEBUG('Sucessfully authenticated with remove host.','ok')
owner.User=self.user
owner.Resource=self.resource
owner._registered_name=owner.User+'@'+owner.Server+'/'+owner.Resource
return method
self.DEBUG('Authentication failed!','error')
def authComponent(self,owner):
""" Authenticate component. Send handshake stanza and wait for result. Returns "ok" on success. """
self.handshake=0
owner.send(Node(NS_COMPONENT_ACCEPT+' handshake',payload=[sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest()]))
owner.RegisterHandler('handshake',self.handshakeHandler,xmlns=NS_COMPONENT_ACCEPT)
while not self.handshake:
self.DEBUG("waiting on handshake",'notify')
owner.Process(1)
owner._registered_name=self.user
if self.handshake+1: return 'ok'
def handshakeHandler(self,disp,stanza):
""" Handler for registering in dispatcher for accepting transport authentication. """
if stanza.getName()=='handshake': self.handshake=1
else: self.handshake=-1
class SASL(PlugIn):
""" Implements SASL authentication. """
def __init__(self,username,password):
PlugIn.__init__(self)
self.username=username
self.password=password
def plugin(self,owner):
if not self._owner.Dispatcher.Stream._document_attrs.has_key('version'): self.startsasl='not-supported'
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self.startsasl=None
def auth(self):
""" Start authentication. Result can be obtained via "SASL.startsasl" attribute and will be
either "success" or "failure". Note that successfull auth will take at least
two Dispatcher.Process() calls. """
if self.startsasl: pass
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove SASL handlers from owner's dispatcher. Used internally. """
if self._owner.__dict__.has_key('features'): self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
if self._owner.__dict__.has_key('challenge'): self._owner.UnregisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('failure'): self._owner.UnregisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('success'): self._owner.UnregisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
def FeaturesHandler(self,conn,feats):
""" Used to determine if server supports SASL auth. Used internally. """
if not feats.getTag('mechanisms',namespace=NS_SASL):
self.startsasl='not-supported'
self.DEBUG('SASL not supported by server','error')
return
mecs=[]
for mec in feats.getTag('mechanisms',namespace=NS_SASL).getTags('mechanism'):
mecs.append(mec.getData())
self._owner.RegisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
if "ANONYMOUS" in mecs and self.username == None:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'ANONYMOUS'})
elif "DIGEST-MD5" in mecs:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'DIGEST-MD5'})
elif "PLAIN" in mecs:
sasl_data='%s\x00%s\x00%s'%(self.username+'@'+self._owner.Server,self.username,self.password)
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'PLAIN'},payload=[base64.encodestring(sasl_data).replace('\r','').replace('\n','')])
else:
self.startsasl='failure'
self.DEBUG('I can only use DIGEST-MD5 and PLAIN mecanisms.','error')
return
self.startsasl='in-process'
self._owner.send(node.__str__())
raise NodeProcessed
def SASLHandler(self,conn,challenge):
""" Perform next SASL auth step. Used internally. """
if challenge.getNamespace()<>NS_SASL: return
if challenge.getName()=='failure':
self.startsasl='failure'
try: reason=challenge.getChildren()[0]
except: reason=challenge
self.DEBUG('Failed SASL authentification: %s'%reason,'error')
raise NodeProcessed
elif challenge.getName()=='success':
self.startsasl='success'
self.DEBUG('Successfully authenticated with remote server.','ok')
handlers=self._owner.Dispatcher.dumpHandlers()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
self._owner.Dispatcher.restoreHandlers(handlers)
self._owner.User=self.username
raise NodeProcessed
########################################3333
incoming_data=challenge.getData()
chal={}
data=base64.decodestring(incoming_data)
self.DEBUG('Got challenge:'+data,'ok')
for pair in re.findall('(\w+\s*=\s*(?:(?:"[^"]+")|(?:[^,]+)))',data):
key,value=[x.strip() for x in pair.split('=', 1)]
if value[:1]=='"' and value[-1:]=='"': value=value[1:-1]
chal[key]=value
if chal.has_key('qop') and 'auth' in [x.strip() for x in chal['qop'].split(',')]:
resp={}
resp['username']=self.username
resp['realm']=self._owner.Server
resp['nonce']=chal['nonce']
cnonce=''
for i in range(7):
cnonce+=hex(int(random.random()*65536*4096))[2:]
resp['cnonce']=cnonce
resp['nc']=('00000001')
resp['qop']='auth'
resp['digest-uri']='xmpp/'+self._owner.Server
A1=C([H(C([resp['username'],resp['realm'],self.password])),resp['nonce'],resp['cnonce']])
A2=C(['AUTHENTICATE',resp['digest-uri']])
response= HH(C([HH(A1),resp['nonce'],resp['nc'],resp['cnonce'],resp['qop'],HH(A2)]))
resp['response']=response
resp['charset']='utf-8'
sasl_data=''
for key in ['charset','username','realm','nonce','nc','cnonce','digest-uri','response','qop']:
if key in ['nc','qop','response','charset']: sasl_data+="%s=%s,"%(key,resp[key])
else: sasl_data+='%s="%s",'%(key,resp[key])
########################################3333
node=Node('response',attrs={'xmlns':NS_SASL},payload=[base64.encodestring(sasl_data[:-1]).replace('\r','').replace('\n','')])
self._owner.send(node.__str__())
elif chal.has_key('rspauth'): self._owner.send(Node('response',attrs={'xmlns':NS_SASL}).__str__())
else:
self.startsasl='failure'
self.DEBUG('Failed SASL authentification: unknown challenge','error')
raise NodeProcessed
class Bind(PlugIn):
""" Bind some JID to the current connection to allow router know of our location."""
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove Bind handler from owner's dispatcher. Used internally. """
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,resource=None):
""" Perform binding. Use provided resource name or random (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if resource: resource=[Node('resource',payload=[resource])]
else: resource=[]
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('bind',attrs={'xmlns':NS_BIND},payload=resource)]))
if isResultNode(resp):
self.bound.append(resp.getTag('bind').getTagData('jid'))
self.DEBUG('Successfully bound %s.'%self.bound[-1],'ok')
jid=JID(resp.getTag('bind').getTagData('jid'))
self._owner.User=jid.getNode()
self._owner.Resource=jid.getResource()
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('session',attrs={'xmlns':NS_SESSION})]))
if isResultNode(resp):
self.DEBUG('Successfully opened session.','ok')
self.session=1
return 'ok'
else:
self.DEBUG('Session open failed.','error')
self.session=0
elif resp: self.DEBUG('Binding failed: %s.'%resp.getTag('error'),'error')
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
class ComponentBind(PlugIn):
""" ComponentBind some JID to the current connection to allow router know of our location."""
def __init__(self, sasl):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
self.needsUnregister=None
self.sasl = sasl
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if not self.sasl:
self.bound=[]
return
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else:
self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
self.needsUnregister=1
def plugout(self):
""" Remove ComponentBind handler from owner's dispatcher. Used internally. """
if self.needsUnregister:
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,domain=None):
""" Perform binding. Use provided domain name (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if self.sasl:
xmlns = NS_COMPONENT_1
else:
xmlns = None
self.bindresponse = None
ttl = dispatcher.DefaultTimeout
self._owner.RegisterHandler('bind',self.BindHandler,xmlns=xmlns)
self._owner.send(Protocol('bind',attrs={'name':domain},xmlns=NS_COMPONENT_1))
while self.bindresponse is None and self._owner.Process(1) and ttl > 0: ttl-=1
self._owner.UnregisterHandler('bind',self.BindHandler,xmlns=xmlns)
resp=self.bindresponse
if resp and resp.getAttr('error'):
self.DEBUG('Binding failed: %s.'%resp.getAttr('error'),'error')
elif resp:
self.DEBUG('Successfully bound.','ok')
return 'ok'
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
def BindHandler(self,conn,bind):
self.bindresponse = bind
pass
|
Suwmlee/XX-Net | refs/heads/python3 | Python3/lib/email/header.py | 12 | # Copyright (C) 2002-2007 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email import charset as _charset
Charset = _charset.Charset
NL = '\n'
SPACE = ' '
BSPACE = b' '
SPACE8 = ' ' * 8
EMPTYSTRING = ''
MAXLINELEN = 78
FWS = ' \t'
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (string, charset) pairs containing each of the decoded
parts of the header. Charset is None for non-encoded parts of the header,
otherwise a lower-case string containing the name of the character set
specified in the encoded string.
header may be a string that may or may not contain RFC2047 encoded words,
or it may be a Header object.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If it is a Header object, we can just return the encoded chunks.
if hasattr(header, '_chunks'):
return [(_charset._encode(string, str(charset)), str(charset))
for string, charset in header._chunks]
# If no encoding, just return the header with no charset.
if not ecre.search(header):
return [(header, None)]
# First step is to parse all the encoded parts into triplets of the form
# (encoded_string, encoding, charset). For unencoded strings, the last
# two parts will be None.
words = []
for line in header.splitlines():
parts = ecre.split(line)
first = True
while parts:
unencoded = parts.pop(0)
if first:
unencoded = unencoded.lstrip()
first = False
if unencoded:
words.append((unencoded, None, None))
if parts:
charset = parts.pop(0).lower()
encoding = parts.pop(0).lower()
encoded = parts.pop(0)
words.append((encoded, encoding, charset))
# Now loop over words and remove words that consist of whitespace
# between two encoded strings.
droplist = []
for n, w in enumerate(words):
if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace():
droplist.append(n-1)
for d in reversed(droplist):
del words[d]
# The next step is to decode each encoded word by applying the reverse
# base64 or quopri transformation. decoded_words is now a list of the
# form (decoded_word, charset).
decoded_words = []
for encoded_string, encoding, charset in words:
if encoding is None:
# This is an unencoded word.
decoded_words.append((encoded_string, charset))
elif encoding == 'q':
word = email.quoprimime.header_decode(encoded_string)
decoded_words.append((word, charset))
elif encoding == 'b':
paderr = len(encoded_string) % 4 # Postel's law: add missing padding
if paderr:
encoded_string += '==='[:4 - paderr]
try:
word = email.base64mime.decode(encoded_string)
except binascii.Error:
raise HeaderParseError('Base64 decoding error')
else:
decoded_words.append((word, charset))
else:
raise AssertionError('Unexpected encoding: ' + encoding)
# Now convert all words to bytes and collapse consecutive runs of
# similarly encoded words.
collapsed = []
last_word = last_charset = None
for word, charset in decoded_words:
if isinstance(word, str):
word = bytes(word, 'raw-unicode-escape')
if last_word is None:
last_word = word
last_charset = charset
elif charset != last_charset:
collapsed.append((last_word, last_charset))
last_word = word
last_charset = charset
elif last_charset is None:
last_word += BSPACE + word
else:
last_word += word
collapsed.append((last_word, last_charset))
return collapsed
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicitly via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 78 as recommended
by RFC 2822.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
elif not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
self._maxlinelen = maxlinelen
if header_name is None:
self._headerlen = 0
else:
# Take the separating colon and space into account.
self._headerlen = len(header_name) + 2
def __str__(self):
"""Return the string value of the header."""
self._normalize()
uchunks = []
lastcs = None
lastspace = None
for string, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
# Don't add a space if the None/us-ascii string already has
# a space (trailing or leading depending on transition)
nextcs = charset
if nextcs == _charset.UNKNOWN8BIT:
original_bytes = string.encode('ascii', 'surrogateescape')
string = original_bytes.decode('ascii', 'replace')
if uchunks:
hasspace = string and self._nonctext(string[0])
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii') and not hasspace:
uchunks.append(SPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii') and not lastspace:
uchunks.append(SPACE)
lastspace = string and self._nonctext(string[-1])
lastcs = nextcs
uchunks.append(string)
return EMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a unicode (of the unencoded header value), swap the
# args and do another comparison.
return other == str(self)
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is false), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In either case, when producing an RFC 2822 compliant
header using RFC 2047 rules, the string will be encoded using the
output codec of the charset. If the string cannot be encoded to the
output codec, a UnicodeError will be raised.
Optional `errors' is passed as the errors argument to the decode
call if s is a byte string.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
if not isinstance(s, str):
input_charset = charset.input_codec or 'us-ascii'
if input_charset == _charset.UNKNOWN8BIT:
s = s.decode('us-ascii', 'surrogateescape')
else:
s = s.decode(input_charset, errors)
# Ensure that the bytes we're storing can be decoded to the output
# character set, otherwise an early error is raised.
output_charset = charset.output_codec or 'us-ascii'
if output_charset != _charset.UNKNOWN8BIT:
try:
s.encode(output_charset, errors)
except UnicodeEncodeError:
if output_charset!='us-ascii':
raise
charset = UTF8
self._chunks.append((s, charset))
def _nonctext(self, s):
"""True if string s is not a ctext character of RFC822.
"""
return s.isspace() or s in ('(', ')', '\\')
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
r"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
Optional maxlinelen specifies the maximum length of each generated
line, exclusive of the linesep string. Individual lines may be longer
than maxlinelen if a folding point cannot be found. The first line
will be shorter by the length of the header name plus ": " if a header
name was specified at Header construction time. The default value for
maxlinelen is determined at header construction time.
Optional splitchars is a string containing characters which should be
given extra weight by the splitting algorithm during normal header
wrapping. This is in very rough support of RFC 2822's `higher level
syntactic breaks': split points preceded by a splitchar are preferred
during line splitting, with the characters preferred in the order in
which they appear in the string. Space and tab may be included in the
string to indicate whether preference should be given to one over the
other as a split point when other split chars do not appear in the line
being split. Splitchars does not affect RFC 2047 encoded lines.
Optional linesep is a string to be used to separate the lines of
the value. The default value is the most useful for typical
Python applications, but it can be set to \r\n to produce RFC-compliant
line separators when needed.
"""
self._normalize()
if maxlinelen is None:
maxlinelen = self._maxlinelen
# A maxlinelen of 0 means don't wrap. For all practical purposes,
# choosing a huge number here accomplishes that and makes the
# _ValueFormatter algorithm much simpler.
if maxlinelen == 0:
maxlinelen = 1000000
formatter = _ValueFormatter(self._headerlen, maxlinelen,
self._continuation_ws, splitchars)
lastcs = None
hasspace = lastspace = None
for string, charset in self._chunks:
if hasspace is not None:
hasspace = string and self._nonctext(string[0])
if lastcs not in (None, 'us-ascii'):
if not hasspace or charset not in (None, 'us-ascii'):
formatter.add_transition()
elif charset not in (None, 'us-ascii') and not lastspace:
formatter.add_transition()
lastspace = string and self._nonctext(string[-1])
lastcs = charset
hasspace = False
lines = string.splitlines()
if lines:
formatter.feed('', lines[0], charset)
else:
formatter.feed('', '', charset)
for line in lines[1:]:
formatter.newline()
if charset.header_encoding is not None:
formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
charset)
else:
sline = line.lstrip()
fws = line[:len(line)-len(sline)]
formatter.feed(fws, sline, charset)
if len(lines) > 1:
formatter.newline()
if self._chunks:
formatter.add_transition()
value = formatter._str(linesep)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _normalize(self):
# Step 1: Normalize the chunks so that all runs of identical charsets
# get collapsed into a single unicode string.
chunks = []
last_charset = None
last_chunk = []
for string, charset in self._chunks:
if charset == last_charset:
last_chunk.append(string)
else:
if last_charset is not None:
chunks.append((SPACE.join(last_chunk), last_charset))
last_chunk = [string]
last_charset = charset
if last_chunk:
chunks.append((SPACE.join(last_chunk), last_charset))
self._chunks = chunks
class _ValueFormatter:
def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
self._maxlen = maxlen
self._continuation_ws = continuation_ws
self._continuation_ws_len = len(continuation_ws)
self._splitchars = splitchars
self._lines = []
self._current_line = _Accumulator(headerlen)
def _str(self, linesep):
self.newline()
return linesep.join(self._lines)
def __str__(self):
return self._str(NL)
def newline(self):
end_of_line = self._current_line.pop()
if end_of_line != (' ', ''):
self._current_line.push(*end_of_line)
if len(self._current_line) > 0:
if self._current_line.is_onlyws():
self._lines[-1] += str(self._current_line)
else:
self._lines.append(str(self._current_line))
self._current_line.reset()
def add_transition(self):
self._current_line.push(' ', '')
def feed(self, fws, string, charset):
# If the charset has no header encoding (i.e. it is an ASCII encoding)
# then we must split the header at the "highest level syntactic break"
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace. Eventually, this should be pluggable.
if charset.header_encoding is None:
self._ascii_split(fws, string, self._splitchars)
return
# Otherwise, we're doing either a Base64 or a quoted-printable
# encoding which means we don't need to split the line on syntactic
# breaks. We can basically just find enough characters to fit on the
# current line, minus the RFC 2047 chrome. What makes this trickier
# though is that we have to split at octet boundaries, not character
# boundaries but it's only safe to split at character boundaries so at
# best we can only get close.
encoded_lines = charset.header_encode_lines(string, self._maxlengths())
# The first element extends the current line, but if it's None then
# nothing more fit on the current line so start a new line.
try:
first_line = encoded_lines.pop(0)
except IndexError:
# There are no encoded lines, so we're done.
return
if first_line is not None:
self._append_chunk(fws, first_line)
try:
last_line = encoded_lines.pop()
except IndexError:
# There was only one line.
return
self.newline()
self._current_line.push(self._continuation_ws, last_line)
# Everything else are full lines in themselves.
for line in encoded_lines:
self._lines.append(self._continuation_ws + line)
def _maxlengths(self):
# The first line's length.
yield self._maxlen - len(self._current_line)
while True:
yield self._maxlen - self._continuation_ws_len
def _ascii_split(self, fws, string, splitchars):
# The RFC 2822 header folding algorithm is simple in principle but
# complex in practice. Lines may be folded any place where "folding
# white space" appears by inserting a linesep character in front of the
# FWS. The complication is that not all spaces or tabs qualify as FWS,
# and we are also supposed to prefer to break at "higher level
# syntactic breaks". We can't do either of these without intimate
# knowledge of the structure of structured headers, which we don't have
# here. So the best we can do here is prefer to break at the specified
# splitchars, and hope that we don't choose any spaces or tabs that
# aren't legal FWS. (This is at least better than the old algorithm,
# where we would sometimes *introduce* FWS after a splitchar, or the
# algorithm before that, where we would turn all white space runs into
# single spaces or tabs.)
parts = re.split("(["+FWS+"]+)", fws+string)
if parts[0]:
parts[:0] = ['']
else:
parts.pop(0)
for fws, part in zip(*[iter(parts)]*2):
self._append_chunk(fws, part)
def _append_chunk(self, fws, string):
self._current_line.push(fws, string)
if len(self._current_line) > self._maxlen:
# Find the best split point, working backward from the end.
# There might be none, on a long first line.
for ch in self._splitchars:
for i in range(self._current_line.part_count()-1, 0, -1):
if ch.isspace():
fws = self._current_line[i][0]
if fws and fws[0]==ch:
break
prevpart = self._current_line[i-1][1]
if prevpart and prevpart[-1]==ch:
break
else:
continue
break
else:
fws, part = self._current_line.pop()
if self._current_line._initial_size > 0:
# There will be a header, so leave it on a line by itself.
self.newline()
if not fws:
# We don't use continuation_ws here because the whitespace
# after a header should always be a space.
fws = ' '
self._current_line.push(fws, part)
return
remainder = self._current_line.pop_from(i)
self._lines.append(str(self._current_line))
self._current_line.reset(remainder)
class _Accumulator(list):
def __init__(self, initial_size=0):
self._initial_size = initial_size
super().__init__()
def push(self, fws, string):
self.append((fws, string))
def pop_from(self, i=0):
popped = self[i:]
self[i:] = []
return popped
def pop(self):
if self.part_count()==0:
return ('', '')
return super().pop()
def __len__(self):
return sum((len(fws)+len(part) for fws, part in self),
self._initial_size)
def __str__(self):
return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
for fws, part in self))
def reset(self, startval=None):
if startval is None:
startval = []
self[:] = startval
self._initial_size = 0
def is_onlyws(self):
return self._initial_size==0 and (not self or str(self).isspace())
def part_count(self):
return super().__len__()
|
Architizer/django-haystack | refs/heads/master | tests/solr_tests/tests/inputs.py | 10 | from django.test import TestCase
from haystack import connections
from haystack import inputs
class SolrInputTestCase(TestCase):
def setUp(self):
super(SolrInputTestCase, self).setUp()
self.query_obj = connections['default'].get_query()
def test_raw_init(self):
raw = inputs.Raw('hello OR there, :you')
self.assertEqual(raw.query_string, 'hello OR there, :you')
self.assertEqual(raw.kwargs, {})
self.assertEqual(raw.post_process, False)
raw = inputs.Raw('hello OR there, :you', test='really')
self.assertEqual(raw.query_string, 'hello OR there, :you')
self.assertEqual(raw.kwargs, {'test': 'really'})
self.assertEqual(raw.post_process, False)
def test_raw_prepare(self):
raw = inputs.Raw('hello OR there, :you')
self.assertEqual(raw.prepare(self.query_obj), 'hello OR there, :you')
def test_clean_init(self):
clean = inputs.Clean('hello OR there, :you')
self.assertEqual(clean.query_string, 'hello OR there, :you')
self.assertEqual(clean.post_process, True)
def test_clean_prepare(self):
clean = inputs.Clean('hello OR there, :you')
self.assertEqual(clean.prepare(self.query_obj), 'hello or there, \\:you')
def test_exact_init(self):
exact = inputs.Exact('hello OR there, :you')
self.assertEqual(exact.query_string, 'hello OR there, :you')
self.assertEqual(exact.post_process, True)
def test_exact_prepare(self):
exact = inputs.Exact('hello OR there, :you')
self.assertEqual(exact.prepare(self.query_obj), u'"hello OR there, :you"')
exact = inputs.Exact('hello OR there, :you', clean=True)
self.assertEqual(exact.prepare(self.query_obj), u'"hello or there, \\:you"')
def test_not_init(self):
not_it = inputs.Not('hello OR there, :you')
self.assertEqual(not_it.query_string, 'hello OR there, :you')
self.assertEqual(not_it.post_process, True)
def test_not_prepare(self):
not_it = inputs.Not('hello OR there, :you')
self.assertEqual(not_it.prepare(self.query_obj), u'NOT (hello or there, \\:you)')
def test_autoquery_init(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.query_string, 'panic -don\'t "froody dude"')
self.assertEqual(autoquery.post_process, False)
def test_autoquery_prepare(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.prepare(self.query_obj), u'panic NOT don\'t "froody dude"')
def test_altparser_init(self):
altparser = inputs.AltParser('dismax')
self.assertEqual(altparser.parser_name, 'dismax')
self.assertEqual(altparser.query_string, '')
self.assertEqual(altparser.kwargs, {})
self.assertEqual(altparser.post_process, False)
altparser = inputs.AltParser('dismax', 'douglas adams', qf='author', mm=1)
self.assertEqual(altparser.parser_name, 'dismax')
self.assertEqual(altparser.query_string, 'douglas adams')
self.assertEqual(altparser.kwargs, {'mm': 1, 'qf': 'author'})
self.assertEqual(altparser.post_process, False)
def test_altparser_prepare(self):
altparser = inputs.AltParser('dismax', 'douglas adams', qf='author', mm=1)
self.assertEqual(altparser.prepare(self.query_obj),
u"""_query_:"{!dismax mm=1 qf=author}douglas adams\"""")
altparser = inputs.AltParser('dismax', 'Don\'t panic', qf='text author', mm=1)
self.assertEqual(altparser.prepare(self.query_obj),
u"""_query_:"{!dismax mm=1 qf='text author'}Don't panic\"""")
|
vipul-sharma20/oh-mainline | refs/heads/master | vendor/packages/python-social-auth/social/backends/fitbit.py | 32 | """
Fitbit OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/fitbit.html
"""
from social.backends.oauth import BaseOAuth1
class FitbitOAuth(BaseOAuth1):
"""Fitbit OAuth authentication backend"""
name = 'fitbit'
AUTHORIZATION_URL = 'https://api.fitbit.com/oauth/authorize'
REQUEST_TOKEN_URL = 'https://api.fitbit.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.fitbit.com/oauth/access_token'
ID_KEY = 'encodedId'
EXTRA_DATA = [('encodedId', 'id'),
('displayName', 'username')]
def get_user_details(self, response):
"""Return user details from Fitbit account"""
return {'username': response.get('displayName'),
'email': ''}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.fitbit.com/1/user/-/profile.json',
auth=self.oauth_auth(access_token)
)['user']
|
f4rnham/server | refs/heads/10.1 | storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py | 47 | # 9/23/2011 Generate blocking row lock tests
import datetime
# generate sql write queries
def mysqlgen_select_for_update(k, kv, c, cv):
print "select * from t where %s=%s for update;" % (k, kv)
def mysqlgen_select_for_update_range(k, c, where):
print "select * from t where %s%s for update;" % (k, where)
def mysqlgen_update(k, kv, c, cv):
print "update t set %s=%s where %s=%s;" % (c, c, k, kv);
def mysqlgen_update_range(k, c, where):
print "update t set %s=%s where %s%s;" % (c, c, k, where);
def mysqlgen_insert_ignore(k, kv, c, cv):
print "insert ignore t values(%s, %s);" % (kv, cv)
def mysqlgen_insert_on_dup_update(k, kv, c, cv):
print "insert t values(%s, %s) on duplicate key update %s=%s;" % (kv, cv, c, c)
def mysqlgen_replace(k, kv, c, cv):
print "replace t values(%s, %s);" % (kv, cv)
# genrate sql read queries
def mysqlgen_select_star():
print "select * from t;"
def mysqlgen_select_where(k, where):
print "select * from t where %s%s;" % (k, where)
# mysql test code generation
def mysqlgen_prepare():
print "# prepare with some common parameters"
print "connect(conn1, localhost, root);"
print "set session transaction isolation level serializable;"
print "connect(conn2, localhost, root);"
print "set session transaction isolation level serializable;"
print "connection conn1;"
print ""
def mysqlgen_reload_table():
print "# drop old table, generate new one. 4 rows"
print "--disable_warnings"
print "drop table if exists t;"
print "--enable_warnings"
print "create table t (a int primary key, b int) engine=tokudb;"
for i in range(1, 7):
mysqlgen_insert_ignore("a", i, "b", i*i)
print ""
def mysqlgen_cleanup():
print "# clean it all up"
print "drop table t;"
print ""
write_point_queries = [
("select for update", mysqlgen_select_for_update),
("update", mysqlgen_update),
("insert", mysqlgen_insert_ignore),
("replace", mysqlgen_replace) ]
write_range_queries = [
("select for update", mysqlgen_select_for_update_range),
("update", mysqlgen_update_range) ]
timeouts = [0, 500]
# Here's where all the magic happens
print "# Tokutek"
print "# Blocking row lock tests;"
print "# Generated by %s on %s;" % (__file__, datetime.date.today())
print ""
mysqlgen_prepare()
mysqlgen_reload_table()
for timeout in timeouts:
print "# testing with timeout %s" % timeout
print "connection conn1;"
print "set session tokudb_lock_timeout=%s;" % timeout
print "connection conn2;"
print "set session tokudb_lock_timeout=%s;" % timeout
print ""
print "# testing each point query vs each point query"
for ta, qa in write_point_queries:
# point vs point contention
for tb, qb in write_point_queries:
print "# testing conflict \"%s\" vs. \"%s\"" % (ta, tb)
print "connection conn1;"
print "begin;"
print "# about to do qa.."
qa("a", "1", "b", "100")
print "connection conn2;"
for k in range(1, 5):
if k == 1:
print "--error ER_LOCK_WAIT_TIMEOUT"
qb("a", k, "b", "100")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
qb("a", "1", "b", "100")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
qb("a", "1", "b", "175")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
# point vs range contention
for rt, rq in write_range_queries:
print "# testing range query \"%s\" vs \"%s\"" % (rt, ta)
print "connection conn1;"
print "begin;"
print ""
qa("a", "1", "b", "100")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", "<=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", ">=0")
rq("a", "b", ">2")
# write range lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rq("a", "b", "<=2")
rq("a", "b", ">=0")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rq("a", "b", "<=2")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
for rt, rq in write_range_queries:
for rtb, rqb in write_range_queries:
print "# testing range query \"%s\" vs range query \"%s\"" % (rt, rtb)
print "connection conn1;"
print "begin;"
print ""
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=0 and a<=3")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=3 and a<=6")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", "<=2")
rqb("a", "b", ">=5")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", ">=3 and a<=5")
mysqlgen_select_where("a", ">=5")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rqb("a", "b", ">=0 and a<=3")
rqb("a", "b", ">=3 and a<=6")
rqb("a", "b", "<=2")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rqb("a", "b", ">=0 and a<=3")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
mysqlgen_cleanup()
|
tkaitchuck/nupic | refs/heads/master | external/darwin64/lib/python2.6/site-packages/Crypto/PublicKey/ElGamal.py | 13 | #
# ElGamal.py : ElGamal encryption/decryption and signatures
#
# Part of the Python Cryptography Toolkit
#
# Originally written by: A.M. Kuchling
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
class error (Exception):
pass
# Generate an ElGamal key with N bits
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate an ElGamal key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=ElGamalobj()
# Generate prime p
if progress_func:
progress_func('p\n')
obj.p=bignum(getPrime(bits, randfunc))
# Generate random number g
if progress_func:
progress_func('g\n')
size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p
if size<1:
size=bits-1
while (1):
obj.g=bignum(getPrime(size, randfunc))
if obj.g < obj.p:
break
size=(size+1) % bits
if size==0:
size=4
# Generate random number x
if progress_func:
progress_func('x\n')
while (1):
size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p
if size>2:
break
while (1):
obj.x=bignum(getPrime(size, randfunc))
if obj.x < obj.p:
break
size = (size+1) % bits
if size==0:
size=4
if progress_func:
progress_func('y\n')
obj.y = pow(obj.g, obj.x, obj.p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)))
: ElGamalobj
Construct an ElGamal key from a 3- or 4-tuple of numbers.
"""
obj=ElGamalobj()
if len(tuple) not in [3,4]:
raise ValueError('argument for construct() wrong length')
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class ElGamalobj(pubkey):
keydata=['p', 'g', 'y', 'x']
def _encrypt(self, M, K):
a=pow(self.g, K, self.p)
b=( M*pow(self.y, K, self.p) ) % self.p
return ( a,b )
def _decrypt(self, M):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
ax=pow(M[0], self.x, self.p)
plaintext=(M[1] * inverse(ax, self.p ) ) % self.p
return plaintext
def _sign(self, M, K):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
p1=self.p-1
if (GCD(K, p1)!=1):
raise ValueError('Bad K value: GCD(K,p-1)!=1')
a=pow(self.g, K, self.p)
t=(M-self.x*a) % p1
while t<0: t=t+p1
b=(t*inverse(K, p1)) % p1
return (a, b)
def _verify(self, M, sig):
v1=pow(self.y, sig[0], self.p)
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
v2=pow(self.g, M, self.p)
if v1==v2:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.g, self.y))
object=ElGamalobj
|
unaizalakain/django | refs/heads/master | django/core/mail/backends/base.py | 577 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')
|
oscartorresco/financial | refs/heads/master | financiero/sells/views.py | 1 | # from ajax_view import ajax_view
from django.shortcuts import render_to_response
from django.template import RequestContext
def nueva_venta(request):
return render_to_response ('template_base.html', request.session, context_instance=RequestContext(request))
|
bealdav/OpenUpgrade | refs/heads/8.0 | addons/resource/tests/test_resource.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
|
Comunitea/CMNT_004_15 | refs/heads/11.0 | project-addons/crm_claim_rma/tests/test_lp_1282584.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_lp_1282584(common.TransactionCase):
""" Test wizard open the right type of view
The wizard can generate picking.in and picking.out
Let's ensure it open the right view for each picking type
"""
def setUp(self):
super(test_lp_1282584, self).setUp()
cr, uid = self.cr, self.uid
self.WizardMakePicking = self.registry('claim_make_picking.wizard')
ClaimLine = self.registry('claim.line')
Claim = self.registry('crm.claim')
customer_type = self.ref('crm_claim_type.crm_claim_type_customer').id
self.product_id = self.ref('product.product_product_4')
self.partner_id = self.ref('base.res_partner_12')
# Create the claim with a claim line
self.claim_id = Claim.create(
cr, uid,
{
'name': 'TEST CLAIM',
'number': 'TEST CLAIM',
'claim_type': customer_type,
'delivery_address_id': self.partner_id,
})
claim = Claim.browse(cr, uid, self.claim_id)
self.warehouse_id = claim.warehouse_id.id
self.claim_line_id = ClaimLine.create(
cr, uid,
{
'name': 'TEST CLAIM LINE',
'claim_origine': 'none',
'product_id': self.product_id,
'claim_id': self.claim_id,
'location_dest_id': claim.warehouse_id.lot_stock_id.id
})
def test_00(self):
"""Test wizard opened view model for a new product return
"""
cr, uid = self.cr, self.uid
wiz_context = {
'active_id': self.claim_id,
'partner_id': self.partner_id,
'warehouse_id': self.warehouse_id,
'picking_type': 'in',
}
wizard_id = self.WizardMakePicking.create(cr, uid, {
}, context=wiz_context)
res = self.WizardMakePicking.action_create_picking(
cr, uid, [wizard_id], context=wiz_context)
self.assertEquals(res.get('res_model'), 'stock.picking.in', "Wrong model defined")
def test_01(self):
"""Test wizard opened view model for a new delivery
"""
cr, uid = self.cr, self.uid
WizardChangeProductQty = self.registry('stock.change.product.qty')
wiz_context = {'active_id': self.product_id}
wizard_chg_qty_id = WizardChangeProductQty.create(cr, uid, {
'product_id': self.product_id,
'new_quantity': 12})
WizardChangeProductQty.change_product_qty(cr, uid, [wizard_chg_qty_id], context=wiz_context)
wiz_context = {
'active_id': self.claim_id,
'partner_id': self.partner_id,
'warehouse_id': self.warehouse_id,
'picking_type': 'out',
}
wizard_id = self.WizardMakePicking.create(cr, uid, {
}, context=wiz_context)
res = self.WizardMakePicking.action_create_picking(
cr, uid, [wizard_id], context=wiz_context)
self.assertEquals(res.get('res_model'), 'stock.picking.out', "Wrong model defined")
|
kbukin1/pnotify-linux-4.1.6 | refs/heads/master | tools/perf/util/setup.py | 766 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
SexualHealthInnovations/django-wizard-builder | refs/heads/master | wizard_builder/tests/urls.py | 2 | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
from .. import views
urlpatterns = [
url(r'^$',
views.NewWizardView.as_view(),
),
url(r'^new/$',
views.NewWizardView.as_view(),
name='wizard_new',
),
url(r'^step/(?P<step>.+)/$',
views.WizardView.as_view(),
name='wizard_update',
),
url(r'^nested_admin/', include('nested_admin.urls')),
url(r'^admin/', admin.site.urls),
]
|
hpcuantwerpen/easybuild-framework | refs/heads/develop | easybuild/toolchains/iiqmpi.py | 2 | ##
# Copyright 2012-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for iiqmpi compiler toolchain (includes Intel compilers, QLogicMPI).
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.iccifort import IccIfort
from easybuild.toolchains.mpi.qlogicmpi import QLogicMPI
class Iiqmpi(IccIfort, QLogicMPI):
"""Compiler toolchain with Intel compilers and QLogic MPI."""
NAME = 'iiqmpi'
SUBTOOLCHAIN = IccIfort.NAME
|
ntiufalara/openerp7 | refs/heads/master | openerp/addons/membership/__init__.py | 441 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Pythonify/awesome | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py | 346 | from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__('_CallbackFileWrapper__fp')
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b'\r\n':
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
|
jimcunderwood/MissionPlanner | refs/heads/master | Lib/email/encoders.py | 61 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
import base64
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(' ', '=20')
def _bencode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
hasnewline = (s[-1] == '\n')
value = base64.encodestring(s)
if not hasnewline and value[-1] == '\n':
return value[:-1]
return value
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _bencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload()
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
# know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.encode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
|
snowcloud/raretunes | refs/heads/master | raretunes/apps/recordings/urlshortener.py | 1 | # Generate a [0-9a-zA-Z] string
ALPHABET = map(str,range(0, 10)) + map(chr, range(97, 123) + range(65, 91))
def encode_id(id_number, alphabet=ALPHABET):
"""Convert an integer to a string."""
if id_number == 0:
return alphabet[0]
alphabet_len = len(alphabet) # Cache
result = ''
while id_number > 0:
id_number, mod = divmod(id_number, alphabet_len)
result = alphabet[mod] + result
return result
def decode_id(id_string, alphabet=ALPHABET):
"""Convert a string to an integer."""
alphabet_len = len(alphabet) # Cache
return sum([alphabet.index(char) * pow(alphabet_len, power) for power, char in enumerate(reversed(id_string))])
|
samliu/servo | refs/heads/master | tests/wpt/harness/wptrunner/wptmanifest/tests/test_static.py | 57 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
|
microcom/odoo | refs/heads/9.0 | openerp/addons/base/tests/test_res_partner_bank.py | 42 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
from openerp.tests.common import TransactionCase
class TestResPartnerBank(TransactionCase):
"""Tests acc_number
"""
def test_sanitized_acc_number(self):
partner_bank_model = self.env['res.partner.bank']
acc_number = " BE-001 2518823 03 "
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(0, len(vals))
partner_bank = partner_bank_model.create({
'acc_number': acc_number,
'partner_id': self.ref('base.res_partner_2'),
'acc_type': 'bank',
})
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search([('acc_number', 'in', [acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.acc_number, acc_number)
# sanitaze the acc_number
sanitized_acc_number = 'BE001251882303'
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search(
[('acc_number', 'in', [sanitized_acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.sanitized_acc_number,
sanitized_acc_number)
# search is case insensitive
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number.lower())])
self.assertEquals(1, len(vals))
vals = partner_bank_model.search(
[('acc_number', '=', acc_number.lower())])
self.assertEquals(1, len(vals))
|
heke123/chromium-crosswalk | refs/heads/master | build/linux/install-chromeos-fonts.py | 18 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to install the Chrome OS fonts on Linux.
# This script can be run manually (as root), but is also run as part
# install-build-deps.sh.
import os
import shutil
import subprocess
import sys
URL_TEMPLATE = ('https://commondatastorage.googleapis.com/chromeos-localmirror/'
'distfiles/%(name)s-%(version)s.tar.bz2')
# Taken from the media-fonts/<name> ebuilds in chromiumos-overlay.
SOURCES = [
{
'name': 'notofonts',
'version': '20150706'
}, {
'name': 'robotofonts',
'version': '20150625'
}
]
URLS = sorted([URL_TEMPLATE % d for d in SOURCES])
FONTS_DIR = '/usr/local/share/fonts'
def main(args):
if not sys.platform.startswith('linux'):
print "Error: %s must be run on Linux." % __file__
return 1
if os.getuid() != 0:
print "Error: %s must be run as root." % __file__
return 1
if not os.path.isdir(FONTS_DIR):
print "Error: Destination directory does not exist: %s" % FONTS_DIR
return 1
dest_dir = os.path.join(FONTS_DIR, 'chromeos')
stamp = os.path.join(dest_dir, ".stamp02")
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == '\n'.join(URLS):
print "Chrome OS fonts already up-to-date in %s." % dest_dir
return 0
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir)
os.mkdir(dest_dir)
os.chmod(dest_dir, 0755)
print "Installing Chrome OS fonts to %s." % dest_dir
for url in URLS:
tarball = os.path.join(dest_dir, os.path.basename(url))
subprocess.check_call(['curl', '-L', url, '-o', tarball])
subprocess.check_call(['tar', '--no-same-owner', '--no-same-permissions',
'-xf', tarball, '-C', dest_dir])
os.remove(tarball)
readme = os.path.join(dest_dir, "README")
with open(readme, 'w') as s:
s.write("This directory and its contents are auto-generated.\n")
s.write("It may be deleted and recreated. Do not modify.\n")
s.write("Script: %s\n" % __file__)
with open(stamp, 'w') as s:
s.write('\n'.join(URLS))
for base, dirs, files in os.walk(dest_dir):
for dir in dirs:
os.chmod(os.path.join(base, dir), 0755)
for file in files:
os.chmod(os.path.join(base, file), 0644)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
alimony/django | refs/heads/master | tests/model_forms/test_uuid.py | 90 | from django import forms
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import UUIDPK
class UUIDPKForm(forms.ModelForm):
class Meta:
model = UUIDPK
fields = '__all__'
class ModelFormBaseTest(TestCase):
def test_create_save_error(self):
form = UUIDPKForm({})
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_update_save_error(self):
obj = UUIDPK.objects.create(name='foo')
form = UUIDPKForm({}, instance=obj)
self.assertFalse(form.is_valid())
msg = "The UUIDPK could not be changed because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
form.save()
def test_model_multiple_choice_field_uuid_pk(self):
f = forms.ModelMultipleChoiceField(UUIDPK.objects.all())
with self.assertRaisesMessage(ValidationError, "'invalid_uuid' is not a valid UUID."):
f.clean(['invalid_uuid'])
|
zerolab/wagtail | refs/heads/main | wagtail/core/migrations/0033_remove_golive_expiry_help_text.py | 24 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-31 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
]
operations = [
migrations.AlterField(
model_name='page',
name='expire_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='expiry date/time'),
),
migrations.AlterField(
model_name='page',
name='go_live_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='go live date/time'),
),
]
|
yoer/hue | refs/heads/master | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/draw.py | 93 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DRAWNS, STYLENS, PRESENTATIONNS
from element import Element
def StyleRefElement(stylename=None, classnames=None, **args):
qattrs = {}
if stylename is not None:
f = stylename.getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'style-name')]= stylename
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'style-name')]= stylename
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
if classnames is not None:
f = classnames[0].getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS,u'class-names')]= classnames
elif f == 'presentation':
qattrs[(PRESENTATIONNS,u'class-names')]= classnames
else:
raise ValueError, "Style's family must be either 'graphic' or 'presentation'"
return Element(qattributes=qattrs, **args)
def DrawElement(name=None, **args):
e = Element(name=name, **args)
if not args.has_key('displayname'):
e.setAttrNS(DRAWNS,'display-name', name)
return e
# Autogenerated
def A(**args):
return Element(qname = (DRAWNS,'a'), **args)
def Applet(**args):
return Element(qname = (DRAWNS,'applet'), **args)
def AreaCircle(**args):
return Element(qname = (DRAWNS,'area-circle'), **args)
def AreaPolygon(**args):
return Element(qname = (DRAWNS,'area-polygon'), **args)
def AreaRectangle(**args):
return Element(qname = (DRAWNS,'area-rectangle'), **args)
def Caption(**args):
return StyleRefElement(qname = (DRAWNS,'caption'), **args)
def Circle(**args):
return StyleRefElement(qname = (DRAWNS,'circle'), **args)
def Connector(**args):
return StyleRefElement(qname = (DRAWNS,'connector'), **args)
def ContourPath(**args):
return Element(qname = (DRAWNS,'contour-path'), **args)
def ContourPolygon(**args):
return Element(qname = (DRAWNS,'contour-polygon'), **args)
def Control(**args):
return StyleRefElement(qname = (DRAWNS,'control'), **args)
def CustomShape(**args):
return StyleRefElement(qname = (DRAWNS,'custom-shape'), **args)
def Ellipse(**args):
return StyleRefElement(qname = (DRAWNS,'ellipse'), **args)
def EnhancedGeometry(**args):
return Element(qname = (DRAWNS,'enhanced-geometry'), **args)
def Equation(**args):
return Element(qname = (DRAWNS,'equation'), **args)
def FillImage(**args):
return DrawElement(qname = (DRAWNS,'fill-image'), **args)
def FloatingFrame(**args):
return Element(qname = (DRAWNS,'floating-frame'), **args)
def Frame(**args):
return StyleRefElement(qname = (DRAWNS,'frame'), **args)
def G(**args):
return StyleRefElement(qname = (DRAWNS,'g'), **args)
def GluePoint(**args):
return Element(qname = (DRAWNS,'glue-point'), **args)
def Gradient(**args):
return DrawElement(qname = (DRAWNS,'gradient'), **args)
def Handle(**args):
return Element(qname = (DRAWNS,'handle'), **args)
def Hatch(**args):
return DrawElement(qname = (DRAWNS,'hatch'), **args)
def Image(**args):
return Element(qname = (DRAWNS,'image'), **args)
def ImageMap(**args):
return Element(qname = (DRAWNS,'image-map'), **args)
def Layer(**args):
return Element(qname = (DRAWNS,'layer'), **args)
def LayerSet(**args):
return Element(qname = (DRAWNS,'layer-set'), **args)
def Line(**args):
return StyleRefElement(qname = (DRAWNS,'line'), **args)
def Marker(**args):
return DrawElement(qname = (DRAWNS,'marker'), **args)
def Measure(**args):
return StyleRefElement(qname = (DRAWNS,'measure'), **args)
def Object(**args):
return Element(qname = (DRAWNS,'object'), **args)
def ObjectOle(**args):
return Element(qname = (DRAWNS,'object-ole'), **args)
def Opacity(**args):
return DrawElement(qname = (DRAWNS,'opacity'), **args)
def Page(**args):
return Element(qname = (DRAWNS,'page'), **args)
def PageThumbnail(**args):
return StyleRefElement(qname = (DRAWNS,'page-thumbnail'), **args)
def Param(**args):
return Element(qname = (DRAWNS,'param'), **args)
def Path(**args):
return StyleRefElement(qname = (DRAWNS,'path'), **args)
def Plugin(**args):
return Element(qname = (DRAWNS,'plugin'), **args)
def Polygon(**args):
return StyleRefElement(qname = (DRAWNS,'polygon'), **args)
def Polyline(**args):
return StyleRefElement(qname = (DRAWNS,'polyline'), **args)
def Rect(**args):
return StyleRefElement(qname = (DRAWNS,'rect'), **args)
def RegularPolygon(**args):
return StyleRefElement(qname = (DRAWNS,'regular-polygon'), **args)
def StrokeDash(**args):
return DrawElement(qname = (DRAWNS,'stroke-dash'), **args)
def TextBox(**args):
return Element(qname = (DRAWNS,'text-box'), **args)
|
bluerover/6lbr | refs/heads/develop | examples/6lbr/test/coojagen/src/generators.py | 2 | import math
import random
def gengrid(step, xcount, ycount):
step = int(step)
xcount = int(xcount)
ycount = int(ycount)
points = []
for xcoord in range(xcount):
for ycoord in range(ycount):
point = (xcoord*step,ycoord*step, 0)
points.append(point)
return(points)
def gengrid_ratio(step, ratio, count):
step = int(step)
count = int(count)
ratio_xy = ratio.split(':')
ratio = float(ratio_xy[0])/float(ratio_xy[1])
xcount = round(math.sqrt(ratio*count))
ycount = round(xcount / ratio)
return gengrid(step, xcount, ycount)
def gengrid_square(step, count):
side = round(math.sqrt(count))
return gengrid(step, side, side)
def genrandom(xmin,xmax,ymin,ymax,count):
count = int(count)
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
random.seed()
points = []
for a in range(count):
points.append((random.uniform(xmin,xmax),random.uniform(ymin,ymax)),0)
return(points)
def genline(step, count):
step = float(step)
count = int(count)
points = []
for xcoord in range(count):
point = (xcoord*step, 0, 0)
points.append(point)
return(points)
def hasattrs(_object, _attributes):
for attr in _attributes:
if not hasattr(_object, attr):
print("Error: %s topology requires a '%s' attribute" %(_object.topology, attr))
return False
return True
def gen(config_simgen, mote_count):
if not hasattr(config_simgen, 'topology'):
print("Error: no 'topology' found in sim generation config file")
return None
if config_simgen.topology == 'line':
if not hasattrs(config_simgen, ['step',]):
print("Error: line topology requires a 'step' parameter")
return None
return genline(config_simgen.step, mote_count)
elif config_simgen.topology == 'grid':
if not hasattrs(config_simgen, ['step', 'xcount', 'ycount']):
return None
return gengrid(config_simgen.step, config_simgen.xcount, config_simgen.ycount)
elif config_simgen.topology == 'grid_square':
if not hasattrs(config_simgen, ['step',]):
return None
return gengrid_square(config_simgen.step, mote_count)
elif config_simgen.topology == 'grid_ratio':
if not hasattrs(config_simgen, ['step', 'ratio']):
return None
return gengrid_ratio(config_simgen.step, config_simgen.ratio, mote_count)
def load_preset(preset_data_path):
points = []
preset_data_file = open(preset_data_path, 'r')
for line in preset_data_file:
line = line.rstrip()
currpoints = []
positions = line.split(';')
for position in positions:
xy = position.split(',')
x = xy[0]
y = xy[1]
currpoints.append((int(x),int(y),0))
points.append(currpoints)
preset_data_file.close()
return(points)
|
Learningtribes/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/tests/test_fields.py | 78 | """Tests for classes defined in fields.py."""
import datetime
import unittest
from django.utils.timezone import UTC
from xmodule.fields import Date, Timedelta, RelativeTime
from xmodule.timeinfo import TimeInfo
class DateTest(unittest.TestCase):
date = Date()
def compare_dates(self, dt1, dt2, expected_delta):
self.assertEqual(
dt1 - dt2,
expected_delta,
str(dt1) + "-" + str(dt2) + "!=" + str(expected_delta)
)
def test_from_json(self):
"""Test conversion from iso compatible date strings to struct_time"""
self.compare_dates(
DateTest.date.from_json("2013-01-01"),
DateTest.date.from_json("2012-12-31"),
datetime.timedelta(days=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00"),
DateTest.date.from_json("2012-12-31T23"),
datetime.timedelta(hours=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00"),
DateTest.date.from_json("2012-12-31T23:59"),
datetime.timedelta(minutes=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00"),
DateTest.date.from_json("2012-12-31T23:59:59"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00Z"),
DateTest.date.from_json("2012-12-31T23:59:59Z"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2012-12-31T23:00:01-01:00"),
DateTest.date.from_json("2013-01-01T00:00:00+01:00"),
datetime.timedelta(hours=1, seconds=1)
)
def test_enforce_type(self):
self.assertEqual(DateTest.date.enforce_type(None), None)
self.assertEqual(DateTest.date.enforce_type(""), None)
self.assertEqual(
DateTest.date.enforce_type("2012-12-31T23:00:01"),
datetime.datetime(2012, 12, 31, 23, 0, 1, tzinfo=UTC())
)
self.assertEqual(
DateTest.date.enforce_type(1234567890000),
datetime.datetime(2009, 2, 13, 23, 31, 30, tzinfo=UTC())
)
self.assertEqual(
DateTest.date.enforce_type(datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC())),
datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC())
)
with self.assertRaises(TypeError):
DateTest.date.enforce_type([1])
def test_return_None(self):
self.assertIsNone(DateTest.date.from_json(""))
self.assertIsNone(DateTest.date.from_json(None))
with self.assertRaises(TypeError):
DateTest.date.from_json(['unknown value'])
def test_old_due_date_format(self):
current = datetime.datetime.today()
self.assertEqual(
datetime.datetime(current.year, 3, 12, 12, tzinfo=UTC()),
DateTest.date.from_json("March 12 12:00")
)
self.assertEqual(
datetime.datetime(current.year, 12, 4, 16, 30, tzinfo=UTC()),
DateTest.date.from_json("December 4 16:30")
)
self.assertIsNone(DateTest.date.from_json("12 12:00"))
def test_non_std_from_json(self):
"""
Test the non-standard args being passed to from_json
"""
now = datetime.datetime.now(UTC())
delta = now - datetime.datetime.fromtimestamp(0, UTC())
self.assertEqual(
DateTest.date.from_json(delta.total_seconds() * 1000),
now
)
yesterday = datetime.datetime.now(UTC()) - datetime.timedelta(days=-1)
self.assertEqual(DateTest.date.from_json(yesterday), yesterday)
def test_to_json(self):
"""
Test converting time reprs to iso dates
"""
self.assertEqual(
DateTest.date.to_json(datetime.datetime.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:59:59Z")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:00:01-01:00")),
"2012-12-31T23:00:01-01:00"
)
with self.assertRaises(TypeError):
DateTest.date.to_json('2012-12-31T23:00:01-01:00')
class TimedeltaTest(unittest.TestCase):
delta = Timedelta()
def test_from_json(self):
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 12 hours 59 minutes 59 seconds'),
datetime.timedelta(days=1, hours=12, minutes=59, seconds=59)
)
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
def test_enforce_type(self):
self.assertEqual(TimedeltaTest.delta.enforce_type(None), None)
self.assertEqual(
TimedeltaTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
TimedeltaTest.delta.enforce_type('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
with self.assertRaises(TypeError):
TimedeltaTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
'1 days 46799 seconds',
TimedeltaTest.delta.to_json(datetime.timedelta(days=1, hours=12, minutes=59, seconds=59))
)
class TimeInfoTest(unittest.TestCase):
def test_time_info(self):
due_date = datetime.datetime(2000, 4, 14, 10, tzinfo=UTC())
grace_pd_string = '1 day 12 hours 59 minutes 59 seconds'
timeinfo = TimeInfo(due_date, grace_pd_string)
self.assertEqual(
timeinfo.close_date,
due_date + Timedelta().from_json(grace_pd_string)
)
class RelativeTimeTest(unittest.TestCase):
delta = RelativeTime()
def test_from_json(self):
self.assertEqual(
RelativeTimeTest.delta.from_json('0:05:07'),
datetime.timedelta(seconds=307)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(100.0),
datetime.timedelta(seconds=100)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(None),
datetime.timedelta(seconds=0)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.from_json(1234) # int
with self.assertRaises(ValueError):
RelativeTimeTest.delta.from_json("77:77:77")
def test_enforce_type(self):
self.assertEqual(RelativeTimeTest.delta.enforce_type(None), None)
self.assertEqual(
RelativeTimeTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
RelativeTimeTest.delta.enforce_type('0:05:07'),
datetime.timedelta(seconds=307)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"00:00:00",
RelativeTimeTest.delta.to_json(None)
)
self.assertEqual(
"00:01:40",
RelativeTimeTest.delta.to_json(100.0)
)
error_msg = "RelativeTime max value is 23:59:59=86400.0 seconds, but 90000.0 seconds is passed"
with self.assertRaisesRegexp(ValueError, error_msg):
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=90000))
with self.assertRaises(TypeError):
RelativeTimeTest.delta.to_json("123")
def test_str(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"11:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=39723))
)
|
fdupoux/ansible-modules-extras | refs/heads/fdevel | cloud/amazon/ec2_eni_facts.py | 5 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni_facts
short_description: Gather facts about ec2 ENI interfaces in AWS
description:
- Gather facts about ec2 ENI interfaces in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
eni_id:
description:
- The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all ENIs
- ec2_eni_facts:
# Gather facts about a particular ENI
- ec2_eni_facts:
eni_id: eni-xxxxxxx
'''
import xml.etree.ElementTree as ET
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def list_eni(connection, module):
eni_id = module.params.get("eni_id")
interface_dict_array = []
try:
all_eni = connection.get_all_network_interfaces(eni_id)
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
for interface in all_eni:
interface_dict_array.append(get_eni_info(interface))
module.exit_json(interfaces=interface_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id = dict(default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_eni(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
lecaoquochung/ddnb.django | refs/heads/master | django/contrib/sitemaps/tests/test_https.py | 13 | from __future__ import unicode_literals
from datetime import date
import warnings
from django.test import override_settings
from django.utils.deprecation import RemovedInDjango20Warning
from .base import SitemapTestsBase
@override_settings(ROOT_URLCONF='django.contrib.sitemaps.tests.urls.https')
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
response = self.client.get('/secure/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RemovedInDjango20Warning)
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://')
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
|
isandlaTech/cohorte-devtools | refs/heads/master | qualifier/deploy/cohorte-home/repo/cohorte/repositories/java/manifest.py | 2 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Utility module to handle Java Manifest.mf files
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import contextlib
import shlex
import sys
PYTHON3 = (sys.version_info[0] == 3)
if PYTHON3:
# Python 3
import io
StringIO = io.StringIO
else:
# Python 2
import StringIO
StringIO = StringIO.StringIO
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
# iPOJO components description key
IPOJO_COMPONENTS_KEY = 'iPOJO-Components'
# ------------------------------------------------------------------------------
class Manifest(object):
"""
Java Manifest parser
"""
def __init__(self):
"""
Sets up the parser
"""
# Manifest entries
self.entries = {}
# get() shortcut
self.get = self.entries.get
def extract_packages_list(self, manifest_key):
"""
Retrieves a list of packages and their attributes
:param manifest_key: Name of the package list in the manifest
:return: A dictionary: package -> dictionary of attributes
"""
parsed_list = {}
packages_list = self.entries.get(manifest_key, '').strip()
if packages_list:
# Use shlex to handle quotes
parser = shlex.shlex(packages_list, posix=True)
parser.whitespace = ','
parser.whitespace_split = True
for package_str in parser:
# Extract import values
package_info = package_str.strip().split(';')
name = package_info[0]
attributes = {}
for value in package_info[1:]:
if value:
attr_name, attr_value = value.split('=', 1)
if attr_name[-1] == ':':
# Remove the ':' of ':=' in some attributes
attr_name = attr_name[:-1].strip()
attributes[attr_name] = attr_value.strip()
parsed_list[name] = attributes
return parsed_list
def format(self):
"""
Formats the entries to be Manifest format compliant
"""
# First line: Manifest version
lines = [': '.join(('Manifest-Version',
self.entries.get('Manifest-Version', '1.0')))]
# Sort keys, except the version
keys = [key.strip() for key in self.entries.keys()
if key != 'Manifest-Version']
keys.sort()
# Wrap values
for key in keys:
line = ': '.join((key, self.entries[key].strip()))
lines.extend(self._wrap_line(line))
return '\n'.join(lines)
def parse(self, manifest):
"""
Parses the given Manifest file content to fill this Manifest
representation
:param manifest: The content of a Manifest file
"""
# Clear current entries
self.entries.clear()
if PYTHON3 and not isinstance(manifest, str):
# Python 3 doesn't like bytes
manifest = str(manifest, 'UTF-8')
# Read the manifest, line by line
with contextlib.closing(StringIO(manifest)) as manifest_io:
key = None
for line in manifest_io.readlines():
if key is not None and line[0] == ' ':
# Line continuation
self.entries[key] += line.strip()
else:
# Strip the line
line = line.strip()
if not line:
# Empty line
key = None
continue
# We have a key
key, value = line.split(':', 1)
# Strip values
self.entries[key] = value.strip()
@staticmethod
def _wrap_line(line):
"""
Wraps a line, Manifest style
:param line: The line to wrap
:return: The wrapped line
"""
# 70 chars for the first line
lines = [line[:70]]
# space + 69 chars for the others
chunk = line[70:]
while chunk:
lines.append(' ' + chunk[:69])
chunk = chunk[69:]
return lines
|
Kamekameha/crunchy-xml-decoder | refs/heads/master | crunchy-xml-decoder.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import argparse
import os
import subprocess
from getpass import getpass
sys.path.append('crunchy-xml-decoder')
import functtest
import ultimate
import login
import decode
import altfuncs
import re, urllib2
from collections import deque
import time
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(autocatch)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def autocatch():
print 'indicate the url : '
url=raw_input()
mykey = urllib2.urlopen(url)
take = open("queue_.txt", "w")
for text in mykey:
match = re.search('<a href="/(.+?)" title=', text)
if match:
print >> take, 'http://www.crunchyroll.com/'+match.group(1)
take.close()
with open('queue_.txt') as f, open('queue.txt', 'w') as fout:
fout.writelines(reversed(f.readlines()))
os.remove('queue_.txt')
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(CHECKING)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if not os.path.exists("export"):
os.makedirs("export")
iquality = 'highest'
ilang1 = 'English'
ilang2 = 'English'
iforcesub = False
iforceusa = False
ilocalizecookies = False
ionlymainsub=False
def defaultsettings(vvquality, vlang1, vlang2, vforcesub, vforceusa, vlocalizecookies, onlymainsub):
dsettings='''[SETTINGS]
# Set this to the preferred quality. Possible values are: "android" (hard-subbed), "360p", "480p", "720p", "1080p", or "highest" for highest available.
# Note that any quality higher than 360p still requires premium, unless it's available that way for free (some first episodes).
# We're not miracle workers.
video_quality = '''+vvquality+'''
# Set this to the desired subtitle language. If the subtitles aren't available in that language, it reverts to the second language option (below).
# Available languages: English, Espanol, Espanol_Espana, Francais, Portugues, Turkce, Italiano, Arabic, Deutsch
language = '''+vlang1+'''
# If the first language isn't available, what language would you like as a backup? Only if then they aren't found, then it goes to English as default
language2 = '''+vlang2+'''
# Set this if you want to use --forced-track rather than --default-track for subtitle
forcesubtitle = '''+str(vforcesub)+'''
# Set this if you want to use a US session ID
forceusa = '''+str(vforceusa)+'''
# Set this if you want to Localize the cookies (this option is under testing and may generate some problem and it willnot work with -forceusa- option)
localizecookies = '''+str(vlocalizecookies)+'''
# Set this if you only want to mux one subtitle only (this so make easy for some devices like TVs to play subtitle)
onlymainsub='''+str(onlymainsub)+'''
'''
open('.\\settings.ini', 'w').write(dsettings.encode('utf-8'))
if not os.path.exists(".\\settings.ini"):
defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub)
if not os.path.exists(".\\cookies"):
if raw_input(u'Do you have an account [Y/N]?').lower() == 'y':
username = raw_input(u'Username: ')
password = getpass('Password(don\'t worry the password are typing but hidden:')
login.login(username, password)
else:
login.login('', '')
else:
userstatus = login.getuserstatus()
print 'User Name='+userstatus[1]
print 'Membership Type='+userstatus[0]
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(Argument Parser)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
parser = argparse.ArgumentParser()
parser.add_argument("-u","--url", type=str,help="Crunchyroll Anime Link. if you get an error, try using double quotation marks (\")")
parser.add_argument("-sn","--season-number", metavar='#', type=int, nargs = 1, help="Crunchyroll Anime Season Number,it's optional option you can ignore")
parser.add_argument("-en","--episode-number", metavar='#', type=int, nargs = 1, help="Crunchyroll Anime Episode Number,it's optional option you can ignore")
parser.add_argument("-l","--login", metavar=('Username','Password'), nargs = 2, help="Crunchyroll login: -l User password. if your password has a blank, use double quotation marks (\"). Example: \"This is a password.\"")
parser.add_argument("-g","--guest", action='store_true', help="Crunchyroll login as guest")
parser.add_argument("-s","--subs-only", action='store_true', help="Download Crunchyroll Anime Subtitle only. if you get an error, try using double quotation marks (\")")
parser.add_argument("-q","--queue", type=str, nargs = '?', metavar='Queue Directory', const='.\\queue.txt', help="Run List of Crunchyroll Anime Link in queue file")
parser.add_argument("-d","--debug", action='store_true', help="Run crunchy-xml-decoder in Debug Mode")
parser.add_argument("-ds","--default-settings", action='store_true', help="Restore default settings")
arg = parser.parse_args()
sys.argv=[]
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#( )#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def queueu(queuepath):
if not os.path.exists(queuepath):
open(queuepath, 'w').write(u'#the any line that has hash before the link will be skiped\n')
subprocess.call('notepad.exe '+queuepath)
lines = open(queuepath).readlines()
for line in lines:
if line.rstrip('\n') ==''.join(line.rstrip('\n').split('#', 1)):
#print ''.join(line.rstrip('\n').split('#', 1))
ultimate.ultimate(line.rstrip('\n'), '', '')
for i in range(0, len(lines)):
if lines[i]== line:
lines[i]='#'+lines[i]
new_line_2=''
for new_line_ in lines:
try:
new_line_2=new_line_2+new_line_
except:
new_line_2=new_line_
open(queuepath, 'w').write(new_line_2)
def Languages_(Varname_):
seleccion = 0
if Varname_ == 'slang1':
print '''Set this to the desired subtitle language. If the subtitles aren\'t available in that language, it reverts to the Secondary language option'''
if Varname_ == 'slang2':
print '''If the Primary language isn't available, what language would you like as a backup? Only if then they aren't found, then it goes to English as default'''
print '''Available Languages:
0.- English
1.- Espanol
2.- Espanol (Espana)
3.- Francais
4.- Portugues
5.- Turkce
6.- Italiano
7.- Arabic
8.- Deutsch
'''
try:
seleccion = int(input("> "))
except:
print "ERROR: Invalid option."
Languages_()
if seleccion == 1 :
return 'Espanol'
elif seleccion == 2 :
return 'Espanol_Espana'
elif seleccion == 3 :
return 'Francais'
elif seleccion == 4 :
return 'Portugues'
elif seleccion == 5 :
return 'Turkce'
elif seleccion == 6 :
return 'Italiano'
elif seleccion == 7 :
return 'Arabic'
elif seleccion == 8 :
return 'Deutsch'
elif seleccion == 0 :
return 'English'
else:
print "ERROR: Invalid option."
Languages_()
def videoquality_():
slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
seleccion = 5
print '''Set This To The Preferred Quality:
0.- android (hard-subbed)
1.- 360p
2.- 480p
3.- 720p
4.- 1080p
5.- highest
Note: Any Quality Higher Than 360p Still Requires Premium, Unless It's Available That Way For Free (Some First Episodes).
We're Not Miracle Workers.
'''
try:
seleccion = int(input("> "))
except:
print "ERROR: Invalid option."
videoquality_()
if seleccion == 0 :
return 'android'
elif seleccion == 1 :
return '360p'
elif seleccion == 2 :
return '480p'
elif seleccion == 3 :
return '720p'
elif seleccion == 4 :
return '1080p'
elif seleccion == 5 :
return 'highest'
else:
print "ERROR: Invalid option."
videoquality_()
def settings_():
slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
slang1 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch'}[slang1]
slang2 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch'}[slang2]
if slang1 == 'Espanol_Espana':
slang1_ = 'Espanol (Espana)'
else:
slang1_ = slang1
if slang2 == 'Espanol_Espana':
slang2_ = 'Espanol (Espana)'
else:
slang2_ = slang2
seleccion = 0
print '''Options:
0.- Exit
1.- Video Quality = '''+vquality+'''
2.- Primary Language = '''+slang1_+'''
3.- Secondary Language = '''+slang2_+'''
4.- Force Subtitle = '''+str(sforcesub)+''' #Use --forced-track in Subtitle
5.- USA Proxy = '''+str(sforceusa)+''' #use a US session ID
6.- Localize cookies = '''+str(slocalizecookies)+''' #Localize the cookies (Experiment)
7.- Only One Subtitle = '''+str(vonlymainsub)+''' #Only download Primary Language
8.- Restore Default Settings
'''
try:
seleccion = int(input("> "))
except:
print "ERROR: Invalid option."
settings_()
if seleccion == 1 :
vquality = videoquality_()
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 2 :
slang1 = Languages_('slang1')
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 3 :
slang2 = Languages_('slang2')
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 4 :
if sforcesub:
sforcesub = False
else:
sforcesub = True
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 5 :
if sforceusa:
sforceusa = False
else:
sforceusa = True
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 6 :
if slocalizecookies:
slocalizecookies = False
else:
slocalizecookies = True
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 7 :
if vonlymainsub:
vonlymainsub = False
else:
vonlymainsub = True
defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
settings_()
elif seleccion == 8 :
defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub)
settings_()
elif seleccion == 0 :
pass
else:
print "ERROR: Invalid option."
settings_()
def makechoise():
seleccion = 0
print '''Options:
0.- Exit
1.- Download Anime
2.- Download Subtitle only
3.- Login
4.- Login As Guest
5.- Download an entire Anime(Autocatch links)
6.- Run Queue
7.- Settings
'''
try:
seleccion = int(input("> "))
except:
try:
os.system('cls')
except:
try:
os.system('clear')
except:
pass
print "ERROR: Invalid option."
makechoise()
if seleccion == 1 :
ultimate.ultimate(raw_input('Please enter Crunchyroll video URL:\n'), '', '')
elif seleccion == 2 :
decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
elif seleccion == 3 :
username = raw_input(u'Username: ')
password = getpass('Password(don\'t worry the password are typing but hidden:')
login.login(username, password)
makechoise()
elif seleccion == 4 :
login.login('', '')
makechoise()
elif seleccion == 5 :
autocatch()
queueu('.\\queue.txt')
elif seleccion == 6 :
queueu('.\\queue.txt')
elif seleccion == 7 :
settings_()
makechoise()
elif seleccion == 8 :
import debug
elif seleccion == 0 :
sys.exit()
else:
try:
os.system('cls')
except:
try:
os.system('clear')
except:
pass
print "ERROR: Invalid option."
makechoise()
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#( )#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if arg.url:
page_url = arg.url
if arg.season_number:
seasonnum = arg.season_number[0]
else:
seasonnum = ''
if arg.episode_number:
epnum = arg.episode_number[0]
else:
epnum = ''
if arg.guest:
login.login('', '')
if arg.login:
username = arg.login[0]
password = arg.login[1]
login.login(username, password)
if arg.debug:
import debug
sys.exit()
if arg.subs_only:
if arg.url:
decode.decode(page_url)
else:
decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
sys.exit()
if arg.default_settings:
defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies)
sys.exit()
if arg.queue:
queueu(arg.queue)
if arg.url and not arg.subs_only:
ultimate.ultimate(page_url, seasonnum, epnum)
else:
makechoise()
#print 'username'
#print 'password'
#print 'page_url'
#print 'seasonnum'
#import ultimate
|
mattrobenolt/pip | refs/heads/develop | tests/data/src/simplewheel-1.0/simple/__init__.py | 9480 | #
|
VigTech/Vigtech-Services | refs/heads/master | env/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 505 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
akhmadMizkat/odoo | refs/heads/master | addons/note/note.py | 41 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import _, SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
class note_stage(osv.osv):
""" Category of Note """
_name = "note.stage"
_description = "Note Stage"
_columns = {
'name': fields.char('Stage Name', translate=True, required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages"),
'user_id': fields.many2one('res.users', 'Owner', help="Owner of the note stage.", required=True, ondelete='cascade'),
'fold': fields.boolean('Folded by Default'),
}
_order = 'sequence asc'
_defaults = {
'fold': 0,
'user_id': lambda self, cr, uid, ctx: uid,
'sequence' : 1,
}
class note_tag(osv.osv):
_name = "note.tag"
_description = "Note Tag"
_columns = {
'name': fields.char('Tag Name', required=True),
'color': fields.integer('Color Index'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class note_note(osv.osv):
""" Note """
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
#writing method (no modification of values)
def name_create(self, cr, uid, name, context=None):
rec_id = self.create(cr, uid, {'memo': name}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
#read the first line (convert hml into text)
def _get_note_first_line(self, cr, uid, ids, name="", args={}, context=None):
res = {}
for note in self.browse(cr, uid, ids, context=context):
res[note.id] = (note.memo and html2plaintext(note.memo) or "").strip().replace('*','').split("\n")[0]
return res
def onclick_note_is_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': False, 'date_done': fields.date.today()}, context=context)
def onclick_note_not_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': True}, context=context)
#return the default stage for the uid user
def _get_default_stage_id(self,cr,uid,context=None):
ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
return ids and ids[0] or False
def _set_stage_per_user(self, cr, uid, id, name, value, args=None, context=None):
note = self.browse(cr, uid, id, context=context)
if not value: return False
stage_ids = [value] + [stage.id for stage in note.stage_ids if stage.user_id.id != uid ]
return self.write(cr, uid, [id], {'stage_ids': [(6, 0, set(stage_ids))]}, context=context)
def _get_stage_per_user(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
for stage in record.stage_ids:
if stage.user_id.id == uid:
result[record.id] = stage.id
return result
_columns = {
'name': fields.function(_get_note_first_line,
string='Note Summary',
type='text', store=True),
'user_id': fields.many2one('res.users', 'Owner'),
'memo': fields.html('Note Content'),
'sequence': fields.integer('Sequence'),
'stage_id': fields.function(_get_stage_per_user,
fnct_inv=_set_stage_per_user,
string='Stage',
type='many2one',
relation='note.stage'),
'stage_ids': fields.many2many('note.stage','note_stage_rel','note_id','stage_id','Stages of Users'),
'open': fields.boolean('Active', track_visibility='onchange'),
'date_done': fields.date('Date done'),
'color': fields.integer('Color Index'),
'tag_ids' : fields.many2many('note.tag','note_tags_rel','note_id','tag_id','Tags'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'open' : 1,
'stage_id' : _get_default_stage_id,
}
_order = 'sequence'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby and groupby[0]=="stage_id":
#search all stages
current_stage_ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
if current_stage_ids: #if the user have some stages
stages = self.pool['note.stage'].browse(cr, uid, current_stage_ids, context=context)
result = [{ #notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search(cr,uid, domain+[('stage_ids', '=', stage.id)], context=context, count=True),
'__fold': stage.fold,
} for stage in stages]
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain+[('stage_ids', 'not in', current_stage_ids)], context=context, count=True)
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', current_stage_ids)
if result and result[0]['stage_id'][0] == current_stage_ids[0]:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count':nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain, context=context, count=True)
if nb_notes_ws:
result = [{ #notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count':nb_notes_ws
}]
else:
result = []
return result
else:
return super(note_note, self).read_group(cr, uid, domain, fields, groupby,
offset=offset, limit=limit, context=context, orderby=orderby,lazy=lazy)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(note_note, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
new_action_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'note.action_note_note')
new_action = self._notification_link_helper(cr, uid, ids, 'new', context=context, action_id=new_action_id)
res['user'] = {
'actions': [{'url': new_action, 'title': _('New Note')}]
}
return res
class res_users(osv.Model):
_name = 'res.users'
_inherit = ['res.users']
def create(self, cr, uid, data, context=None):
user_id = super(res_users, self).create(cr, uid, data, context=context)
note_obj = self.pool['note.stage']
data_obj = self.pool['ir.model.data']
is_employee = self.has_group(cr, user_id, 'base.group_user')
if is_employee:
for n in range(5):
xmlid = 'note_stage_%02d' % (n,)
try:
_model, stage_id = data_obj.get_object_reference(cr, SUPERUSER_ID, 'note', xmlid)
except ValueError:
continue
note_obj.copy(cr, SUPERUSER_ID, stage_id, default={'user_id': user_id}, context=context)
return user_id
|
nbcesar/sabergrade | refs/heads/master | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
superchilli/webapp | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 319 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
Xeleste/namebench | refs/heads/master | nb_third_party/jinja2/lexer.py | 211 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache, next
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
else:
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^\s*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def next(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._next = iter(generator).next
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __nonzero__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in xrange(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def next(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = self._next()
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._next = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.newline_sequence)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assamble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
self.newline_sequence = environment.newline_sequence
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*%s)' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c('(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c('\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normlize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception, e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
# if we can express it as bytestring (ascii only)
# we do that for support of semi broken APIs
# as datetime.datetime.strftime. On python 3 this
# call becomes a noop thanks to 2to3
try:
value = str(value)
except UnicodeError:
pass
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = '\n'.join(unicode(source).splitlines())
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if brances / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we havn't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
|
jarv/cmdchallenge-site | refs/heads/master | lambda_src/runcmd/docker/utils/decorators.py | 6 | import functools
from .. import errors
from . import utils
def check_resource(f):
@functools.wraps(f)
def wrapped(self, resource_id=None, *args, **kwargs):
if resource_id is None:
if kwargs.get('container'):
resource_id = kwargs.pop('container')
elif kwargs.get('image'):
resource_id = kwargs.pop('image')
if isinstance(resource_id, dict):
resource_id = resource_id.get('Id', resource_id.get('ID'))
if not resource_id:
raise errors.NullResource(
'image or container param is undefined'
)
return f(self, resource_id, *args, **kwargs)
return wrapped
def minimum_version(version):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
'{0} is not available for version < {1}'.format(
f.__name__, version
)
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner
|
rosmo/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_instance_nic_secondaryip.py | 26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance_nic_secondaryip
short_description: Manages secondary IPs of an instance on Apache CloudStack based clouds.
description:
- Add and remove secondary IPs to and from a NIC of an instance.
version_added: '2.4'
author: René Moser (@resmo)
options:
vm:
description:
- Name of instance.
type: str
required: true
aliases: [ name ]
network:
description:
- Name of the network.
- Required to find the NIC if instance has multiple networks assigned.
type: str
vm_guest_ip:
description:
- Secondary IP address to be added to the instance nic.
- If not set, the API always returns a new IP address and idempotency is not given.
type: str
aliases: [ secondary_ip ]
vpc:
description:
- Name of the VPC the I(vm) is related to.
type: str
domain:
description:
- Domain the instance is related to.
type: str
account:
description:
- Account the instance is related to.
type: str
project:
description:
- Name of the project the instance is deployed in.
type: str
zone:
description:
- Name of the zone in which the instance is deployed in.
- If not set, default zone is used.
type: str
state:
description:
- State of the ipaddress.
type: str
default: present
choices: [ present, absent ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Assign a specific IP to the default NIC of the VM
cs_instance_nic_secondaryip:
vm: customer_xy
vm_guest_ip: 10.10.10.10
delegate_to: localhost
# Note: If vm_guest_ip is not set, you will get a new IP address on every run.
- name: Assign an IP to the default NIC of the VM
cs_instance_nic_secondaryip:
vm: customer_xy
delegate_to: localhost
- name: Remove a specific IP from the default NIC
cs_instance_nic_secondaryip:
vm: customer_xy
vm_guest_ip: 10.10.10.10
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the NIC.
returned: success
type: str
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
vm:
description: Name of the VM.
returned: success
type: str
sample: web-01
ip_address:
description: Primary IP of the NIC.
returned: success
type: str
sample: 10.10.10.10
netmask:
description: Netmask of the NIC.
returned: success
type: str
sample: 255.255.255.0
mac_address:
description: MAC address of the NIC.
returned: success
type: str
sample: 02:00:33:31:00:e4
vm_guest_ip:
description: Secondary IP of the NIC.
returned: success
type: str
sample: 10.10.10.10
network:
description: Name of the network if not default.
returned: success
type: str
sample: sync network
domain:
description: Domain the VM is related to.
returned: success
type: str
sample: example domain
account:
description: Account the VM is related to.
returned: success
type: str
sample: example account
project:
description: Name of project the VM is related to.
returned: success
type: str
sample: Production
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackInstanceNicSecondaryIp(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstanceNicSecondaryIp, self).__init__(module)
self.vm_guest_ip = self.module.params.get('vm_guest_ip')
self.nic = None
self.returns = {
'ipaddress': 'ip_address',
'macaddress': 'mac_address',
'netmask': 'netmask',
}
def get_nic(self):
if self.nic:
return self.nic
args = {
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
nics = self.query_api('listNics', **args)
if nics:
self.nic = nics['nic'][0]
return self.nic
self.fail_json(msg="NIC for VM %s in network %s not found" % (self.get_vm(key='name'), self.get_network(key='name')))
def get_secondary_ip(self):
nic = self.get_nic()
if self.vm_guest_ip:
secondary_ips = nic.get('secondaryip') or []
for secondary_ip in secondary_ips:
if secondary_ip['ipaddress'] == self.vm_guest_ip:
return secondary_ip
return None
def present_nic_ip(self):
nic = self.get_nic()
if not self.get_secondary_ip():
self.result['changed'] = True
args = {
'nicid': nic['id'],
'ipaddress': self.vm_guest_ip,
}
if not self.module.check_mode:
res = self.query_api('addIpToNic', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
nic = self.poll_job(res, 'nicsecondaryip')
# Save result for RETURNS
self.vm_guest_ip = nic['ipaddress']
return nic
def absent_nic_ip(self):
nic = self.get_nic()
secondary_ip = self.get_secondary_ip()
if secondary_ip:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('removeIpFromNic', id=secondary_ip['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'nicsecondaryip')
return nic
def get_result(self, nic):
super(AnsibleCloudStackInstanceNicSecondaryIp, self).get_result(nic)
if nic and not self.module.params.get('network'):
self.module.params['network'] = nic.get('networkid')
self.result['network'] = self.get_network(key='name')
self.result['vm'] = self.get_vm(key='name')
self.result['vm_guest_ip'] = self.vm_guest_ip
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vm=dict(required=True, aliases=['name']),
vm_guest_ip=dict(aliases=['secondary_ip']),
network=dict(),
vpc=dict(),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True,
required_if=([
('state', 'absent', ['vm_guest_ip'])
])
)
acs_instance_nic_secondaryip = AnsibleCloudStackInstanceNicSecondaryIp(module)
state = module.params.get('state')
if state == 'absent':
nic = acs_instance_nic_secondaryip.absent_nic_ip()
else:
nic = acs_instance_nic_secondaryip.present_nic_ip()
result = acs_instance_nic_secondaryip.get_result(nic)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
flar2/ElementalX-evita-8.0 | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
eestay/edx-platform | refs/heads/master | openedx/core/djangoapps/course_groups/cohorts.py | 14 | """
This file contains the logic for cohorts, as exposed internally to the
forums, and to the cohort admin views.
"""
import logging
import random
from django.db import transaction
from django.db.models.signals import post_save, m2m_changed
from django.dispatch import receiver
from django.http import Http404
from django.utils.translation import ugettext as _
from courseware import courses
from eventtracking import tracker
from request_cache.middleware import RequestCache
from student.models import get_user_by_username_or_email
from .models import CourseUserGroup, CourseCohort, CourseCohortsSettings, CourseUserGroupPartitionGroup
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseUserGroup)
def _cohort_added(sender, **kwargs):
"""Emits a tracking log event each time a cohort is created"""
instance = kwargs["instance"]
if kwargs["created"] and instance.group_type == CourseUserGroup.COHORT:
tracker.emit(
"edx.cohort.created",
{"cohort_id": instance.id, "cohort_name": instance.name}
)
@receiver(m2m_changed, sender=CourseUserGroup.users.through)
def _cohort_membership_changed(sender, **kwargs):
"""Emits a tracking log event each time cohort membership is modified"""
def get_event_iter(user_id_iter, cohort_iter):
return (
{"cohort_id": cohort.id, "cohort_name": cohort.name, "user_id": user_id}
for user_id in user_id_iter
for cohort in cohort_iter
)
action = kwargs["action"]
instance = kwargs["instance"]
pk_set = kwargs["pk_set"]
reverse = kwargs["reverse"]
if action == "post_add":
event_name = "edx.cohort.user_added"
elif action in ["post_remove", "pre_clear"]:
event_name = "edx.cohort.user_removed"
else:
return
if reverse:
user_id_iter = [instance.id]
if action == "pre_clear":
cohort_iter = instance.course_groups.filter(group_type=CourseUserGroup.COHORT)
else:
cohort_iter = CourseUserGroup.objects.filter(pk__in=pk_set, group_type=CourseUserGroup.COHORT)
else:
cohort_iter = [instance] if instance.group_type == CourseUserGroup.COHORT else []
if action == "pre_clear":
user_id_iter = (user.id for user in instance.users.all())
else:
user_id_iter = pk_set
for event in get_event_iter(user_id_iter, cohort_iter):
tracker.emit(event_name, event)
# A 'default cohort' is an auto-cohort that is automatically created for a course if no cohort with automatic
# assignment have been specified. It is intended to be used in a cohorted-course for users who have yet to be assigned
# to a cohort.
# Translation Note: We are NOT translating this string since it is the constant identifier for the "default group"
# and needed across product boundaries.
DEFAULT_COHORT_NAME = "Default Group"
# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even
# if and when that's fixed, it's a good idea to have a local generator to avoid any other
# code that messes with the global random module.
_local_random = None
def local_random():
"""
Get the local random number generator. In a function so that we don't run
random.Random() at import time.
"""
# ironic, isn't it?
global _local_random
if _local_random is None:
_local_random = random.Random()
return _local_random
def is_course_cohorted(course_key):
"""
Given a course key, return a boolean for whether or not the course is
cohorted.
Raises:
Http404 if the course doesn't exist.
"""
return get_course_cohort_settings(course_key).is_cohorted
def get_cohort_id(user, course_key, use_cached=False):
"""
Given a course key and a user, return the id of the cohort that user is
assigned to in that course. If they don't have a cohort, return None.
"""
cohort = get_cohort(user, course_key, use_cached=use_cached)
return None if cohort is None else cohort.id
def is_commentable_cohorted(course_key, commentable_id):
"""
Args:
course_key: CourseKey
commentable_id: string
Returns:
Bool: is this commentable cohorted?
Raises:
Http404 if the course doesn't exist.
"""
course = courses.get_course_by_id(course_key)
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted:
# this is the easy case :)
ans = False
elif (
commentable_id in course.top_level_discussion_topic_ids or
course_cohort_settings.always_cohort_inline_discussions is False
):
# top level discussions have to be manually configured as cohorted
# (default is not).
# Same thing for inline discussions if the default is explicitly set to False in settings
ans = commentable_id in course_cohort_settings.cohorted_discussions
else:
# inline discussions are cohorted by default
ans = True
log.debug(u"is_commentable_cohorted({0}, {1}) = {2}".format(
course_key, commentable_id, ans
))
return ans
def get_cohorted_commentables(course_key):
"""
Given a course_key return a set of strings representing cohorted commentables.
"""
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted:
# this is the easy case :)
ans = set()
else:
ans = set(course_cohort_settings.cohorted_discussions)
return ans
@transaction.commit_on_success
def get_cohort(user, course_key, assign=True, use_cached=False):
"""Returns the user's cohort for the specified course.
The cohort for the user is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
Arguments:
user: a Django User object.
course_key: CourseKey
assign (bool): if False then we don't assign a group to user
use_cached (bool): Whether to use the cached value or fetch from database.
Returns:
A CourseUserGroup object if the course is cohorted and the User has a
cohort, else None.
Raises:
ValueError if the CourseKey doesn't exist.
"""
request_cache = RequestCache.get_request_cache()
cache_key = u"cohorts.get_cohort.{}.{}".format(user.id, course_key)
if use_cached and cache_key in request_cache.data:
return request_cache.data[cache_key]
request_cache.data.pop(cache_key, None)
# First check whether the course is cohorted (users shouldn't be in a cohort
# in non-cohorted courses, but settings can change after course starts)
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted:
return request_cache.data.setdefault(cache_key, None)
# If course is cohorted, check if the user already has a cohort.
try:
cohort = CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
users__id=user.id,
)
return request_cache.data.setdefault(cache_key, cohort)
except CourseUserGroup.DoesNotExist:
# Didn't find the group. If we do not want to assign, return here.
if not assign:
# Do not cache the cohort here, because in the next call assign
# may be True, and we will have to assign the user a cohort.
return None
# Otherwise assign the user a cohort.
course = courses.get_course(course_key)
cohorts = get_course_cohorts(course, assignment_type=CourseCohort.RANDOM)
if cohorts:
cohort = local_random().choice(cohorts)
else:
cohort = CourseCohort.create(
cohort_name=DEFAULT_COHORT_NAME,
course_id=course_key,
assignment_type=CourseCohort.RANDOM
).course_user_group
user.course_groups.add(cohort)
return request_cache.data.setdefault(cache_key, cohort)
def migrate_cohort_settings(course):
"""
Migrate all the cohort settings associated with this course from modulestore to mysql.
After that we will never touch modulestore for any cohort related settings.
"""
cohort_settings, created = CourseCohortsSettings.objects.get_or_create(
course_id=course.id,
defaults={
'is_cohorted': course.is_cohorted,
'cohorted_discussions': list(course.cohorted_discussions),
'always_cohort_inline_discussions': course.always_cohort_inline_discussions
}
)
# Add the new and update the existing cohorts
if created:
# Update the manual cohorts already present in CourseUserGroup
manual_cohorts = CourseUserGroup.objects.filter(
course_id=course.id,
group_type=CourseUserGroup.COHORT
).exclude(name__in=course.auto_cohort_groups)
for cohort in manual_cohorts:
CourseCohort.create(course_user_group=cohort)
for group_name in course.auto_cohort_groups:
CourseCohort.create(cohort_name=group_name, course_id=course.id, assignment_type=CourseCohort.RANDOM)
return cohort_settings
def get_course_cohorts(course, assignment_type=None):
"""
Get a list of all the cohorts in the given course. This will include auto cohorts,
regardless of whether or not the auto cohorts include any users.
Arguments:
course: the course for which cohorts should be returned
assignment_type: cohort assignment type
Returns:
A list of CourseUserGroup objects. Empty if there are no cohorts. Does
not check whether the course is cohorted.
"""
# Migrate cohort settings for this course
migrate_cohort_settings(course)
query_set = CourseUserGroup.objects.filter(
course_id=course.location.course_key,
group_type=CourseUserGroup.COHORT
)
query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set
return list(query_set)
### Helpers for cohort management views
def get_cohort_by_name(course_key, name):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
name=name
)
def get_cohort_by_id(course_key, cohort_id):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present. Uses the course_key for extra validation.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
id=cohort_id
)
def add_cohort(course_key, name, assignment_type):
"""
Add a cohort to a course. Raises ValueError if a cohort of the same name already
exists.
"""
log.debug("Adding cohort %s to %s", name, course_key)
if is_cohort_exists(course_key, name):
raise ValueError(_("You cannot create two cohorts with the same name"))
try:
course = courses.get_course_by_id(course_key)
except Http404:
raise ValueError("Invalid course_key")
cohort = CourseCohort.create(
cohort_name=name,
course_id=course.id,
assignment_type=assignment_type
).course_user_group
tracker.emit(
"edx.cohort.creation_requested",
{"cohort_name": cohort.name, "cohort_id": cohort.id}
)
return cohort
def is_cohort_exists(course_key, name):
"""
Check if a cohort already exists.
"""
return CourseUserGroup.objects.filter(course_id=course_key, group_type=CourseUserGroup.COHORT, name=name).exists()
def add_user_to_cohort(cohort, username_or_email):
"""
Look up the given user, and if successful, add them to the specified cohort.
Arguments:
cohort: CourseUserGroup
username_or_email: string. Treated as email if has '@'
Returns:
Tuple of User object and string (or None) indicating previous cohort
Raises:
User.DoesNotExist if can't find user.
ValueError if user already present in this cohort.
"""
user = get_user_by_username_or_email(username_or_email)
previous_cohort_name = None
previous_cohort_id = None
course_cohorts = CourseUserGroup.objects.filter(
course_id=cohort.course_id,
users__id=user.id,
group_type=CourseUserGroup.COHORT
)
if course_cohorts.exists():
if course_cohorts[0] == cohort:
raise ValueError("User {user_name} already present in cohort {cohort_name}".format(
user_name=user.username,
cohort_name=cohort.name
))
else:
previous_cohort = course_cohorts[0]
previous_cohort.users.remove(user)
previous_cohort_name = previous_cohort.name
previous_cohort_id = previous_cohort.id
tracker.emit(
"edx.cohort.user_add_requested",
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
"previous_cohort_id": previous_cohort_id,
"previous_cohort_name": previous_cohort_name,
}
)
cohort.users.add(user)
return (user, previous_cohort_name)
def get_group_info_for_cohort(cohort, use_cached=False):
"""
Get the ids of the group and partition to which this cohort has been linked
as a tuple of (int, int).
If the cohort has not been linked to any group/partition, both values in the
tuple will be None.
The partition group info is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
"""
request_cache = RequestCache.get_request_cache()
cache_key = u"cohorts.get_group_info_for_cohort.{}".format(cohort.id)
if use_cached and cache_key in request_cache.data:
return request_cache.data[cache_key]
request_cache.data.pop(cache_key, None)
try:
partition_group = CourseUserGroupPartitionGroup.objects.get(course_user_group=cohort)
return request_cache.data.setdefault(cache_key, (partition_group.group_id, partition_group.partition_id))
except CourseUserGroupPartitionGroup.DoesNotExist:
pass
return request_cache.data.setdefault(cache_key, (None, None))
def set_assignment_type(user_group, assignment_type):
"""
Set assignment type for cohort.
"""
course_cohort = user_group.cohort
if is_default_cohort(user_group) and course_cohort.assignment_type != assignment_type:
raise ValueError(_("There must be one cohort to which students can automatically be assigned."))
course_cohort.assignment_type = assignment_type
course_cohort.save()
def get_assignment_type(user_group):
"""
Get assignment type for cohort.
"""
course_cohort = user_group.cohort
return course_cohort.assignment_type
def is_default_cohort(user_group):
"""
Check if a cohort is default.
"""
random_cohorts = CourseUserGroup.objects.filter(
course_id=user_group.course_id,
group_type=CourseUserGroup.COHORT,
cohort__assignment_type=CourseCohort.RANDOM
)
return len(random_cohorts) == 1 and random_cohorts[0].name == user_group.name
def set_course_cohort_settings(course_key, **kwargs):
"""
Set cohort settings for a course.
Arguments:
course_key: CourseKey
is_cohorted (bool): If the course should be cohorted.
always_cohort_inline_discussions (bool): If inline discussions should always be cohorted.
cohorted_discussions (list): List of discussion ids.
Returns:
A CourseCohortSettings object.
Raises:
Http404 if course_key is invalid.
"""
fields = {'is_cohorted': bool, 'always_cohort_inline_discussions': bool, 'cohorted_discussions': list}
course_cohort_settings = get_course_cohort_settings(course_key)
for field, field_type in fields.items():
if field in kwargs:
if not isinstance(kwargs[field], field_type):
raise ValueError("Incorrect field type for `{}`. Type must be `{}`".format(field, field_type.__name__))
setattr(course_cohort_settings, field, kwargs[field])
course_cohort_settings.save()
return course_cohort_settings
def get_course_cohort_settings(course_key):
"""
Return cohort settings for a course.
Arguments:
course_key: CourseKey
Returns:
A CourseCohortSettings object.
Raises:
Http404 if course_key is invalid.
"""
try:
course_cohort_settings = CourseCohortsSettings.objects.get(course_id=course_key)
except CourseCohortsSettings.DoesNotExist:
course = courses.get_course_by_id(course_key)
course_cohort_settings = migrate_cohort_settings(course)
return course_cohort_settings
|
stackforge/tacker | refs/heads/master | tacker/api/validation/__init__.py | 2 | # Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body validating middleware.
"""
import functools
import webob
from tacker.api.validation import validators
from tacker.common import exceptions
def schema(request_body_schema):
"""Register a schema to validate request body.
Registered schema will be used for validating request body just before
API method executing.
:param dict request_body_schema: a schema to validate request body
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
schema_validator = validators._SchemaValidator(
request_body_schema)
try:
schema_validator.validate(kwargs['body'])
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Malformed request body"))
return func(*args, **kwargs)
return wrapper
return add_validator
def query_schema(query_params_schema):
"""Register a schema to validate request query parameters.
Registered schema will be used for validating request query params just
before API method executing.
:param query_params_schema: A dict, the JSON-Schema for validating the
query parameters.
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# NOTE(tpatil): The second argument of the method
# calling this method should always be 'request'.
if 'request' in kwargs:
req = kwargs['request']
else:
req = args[1]
try:
req.GET.dict_of_lists()
except UnicodeDecodeError:
msg = _('Query string is not UTF-8 encoded')
raise exceptions.ValidationError(msg)
query_opts = {}
query_opts.update(req.GET)
schema_validator = validators._SchemaValidator(
query_params_schema)
schema_validator.validate(query_opts)
return func(*args, **kwargs)
return wrapper
return add_validator
|
yephper/django | refs/heads/master | tests/postgres_tests/array_index_migrations/__init__.py | 12133432 | |
dittos/graphqllib | refs/heads/master | tests_py35/__init__.py | 12133432 | |
bingopodcast/bingos | refs/heads/master | bingo_emulator/bahama_beach/__init__.py | 12133432 | |
omaraboumrad/djanground | refs/heads/master | backend/dryorm/routing.py | 1 | from channels.routing import route
from dryorm.consumers import ws_message
channel_routing = [
route('websocket.receive', ws_message),
]
|
langyapojun/shadowsocks | refs/heads/master | tests/test_udp_src.py | 1009 | #!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
|
jiangzhuo/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/ctypes/test/test_slicing.py | 85 | import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
self.assertEqual(a[0:2], b[0:2])
self.assertEqual(a[0:2:], b[0:2:])
self.assertEqual(len(a), len(b))
self.assertEqual(a[5:7], b[5:7])
self.assertEqual(a[5:7:], b[5:7:])
self.assertEqual(a[-1], b[-1])
self.assertEqual(a[:], b[:])
self.assertEqual(a[::], b[::])
self.assertEqual(a[10::-1], b[10::-1])
self.assertEqual(a[30:20:-1], b[30:20:-1])
self.assertEqual(a[:12:6], b[:12:6])
self.assertEqual(a[2:6:4], b[2:6:4])
a[0:5] = range(5, 10)
self.assertEqual(a[0:5], list(range(5, 10)))
self.assertEqual(a[0:5:], list(range(5, 10)))
self.assertEqual(a[4::-1], list(range(9, 4, -1)))
def test_setslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
a[32:47] = list(range(32, 47))
self.assertEqual(a[32:47], list(range(32, 47)))
a[32:47] = range(132, 147)
self.assertEqual(a[32:47:], list(range(132, 147)))
a[46:31:-1] = range(232, 247)
self.assertEqual(a[32:47:1], list(range(246, 231, -1)))
a[32:47] = range(1132, 1147)
self.assertEqual(a[:], b)
a[32:47:7] = range(3)
b[32:47:7] = range(3)
self.assertEqual(a[:], b)
a[33::-3] = range(12)
b[33::-3] = range(12)
self.assertEqual(a[:], b)
from operator import setitem
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5), "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
[1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of same size
self.assertRaises(ValueError, setitem, a, slice(0, 5), range(32))
def test_char_ptr(self):
s = b"abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:3], s[:3])
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
self.assertEqual(res[0:-1:-1], s[0::-1])
import operator
self.assertRaises(ValueError, operator.getitem,
res, slice(None, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(0, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(None, 5, -1))
self.assertRaises(ValueError, operator.getitem,
res, slice(-5, None, None))
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], list(range(ord("a"), ord("z")+1)))
self.assertEqual(res[:len(s):], list(range(ord("a"), ord("z")+1)))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = b"abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.assertEqual(res, s)
finally:
del dll.my_strdup.errcheck
def test_char_array(self):
s = b"abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.assertEqual(p[:], s)
self.assertEqual(p[::], s)
self.assertEqual(p[::-1], s[::-1])
self.assertEqual(p[5::-2], s[5::-2])
self.assertEqual(p[2:5:-3], s[2:5:-3])
@need_symbol('c_wchar')
def test_wchar_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
import operator
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
self.skipTest('Pointers to c_wchar are not supported')
res = dll.my_wcsdup(s)
tmpl = list(range(ord("a"), ord("z")+1))
self.assertEqual(res[:len(s)-1], tmpl)
self.assertEqual(res[:len(s)-1:], tmpl)
self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main()
|
ksanchezcld/volatility | refs/heads/master | volatility/plugins/overlays/native_types.py | 58 | import copy
## The following is a conversion of basic C99 types to python struct
## format strings. NOTE: since volatility is analysing images which
## are not necessarily the same bit size as the currently running
## platform you may not use platform specific format specifiers here
## like l or L - you must use i or I.
x86_native_types = {
'int' : [4, '<i'],
'long': [4, '<i'],
'unsigned long' : [4, '<I'],
'unsigned int' : [4, '<I'],
'address' : [4, '<I'],
'char' : [1, '<c'],
'unsigned char' : [1, '<B'],
'unsigned short int' : [2, '<H'],
'unsigned short' : [2, '<H'],
'unsigned be short' : [2, '>H'],
'short' : [2, '<h'],
'long long' : [8, '<q'],
'unsigned long long' : [8, '<Q'],
}
x64_native_types = copy.deepcopy(x86_native_types)
x64_native_types['address'] = [8, '<Q']
|
polyval/CNC | refs/heads/master | flask/Lib/site-packages/flask_wtf/form.py | 89 | # coding: utf-8
import werkzeug.datastructures
from jinja2 import Markup, escape
from flask import request, session, current_app
from wtforms.fields import HiddenField
from wtforms.widgets import HiddenInput
from wtforms.validators import ValidationError
from wtforms.ext.csrf.form import SecureForm
from ._compat import text_type, string_types
from .csrf import generate_csrf, validate_csrf
try:
from .i18n import translations
except ImportError:
translations = None # babel not installed
class _Auto():
'''Placeholder for unspecified variables that should be set to defaults.
Used when None is a valid option and should not be replaced by a default.
'''
pass
def _is_hidden(field):
"""Detect if the field is hidden."""
if isinstance(field, HiddenField):
return True
if isinstance(field.widget, HiddenInput):
return True
return False
class Form(SecureForm):
"""
Flask-specific subclass of WTForms **SecureForm** class.
If formdata is not specified, this will use flask.request.form.
Explicitly pass formdata = None to prevent this.
:param csrf_context: a session or dict-like object to use when making
CSRF tokens. Default: flask.session.
:param secret_key: a secret key for building CSRF tokens. If this isn't
specified, the form will take the first of these
that is defined:
* SECRET_KEY attribute on this class
* WTF_CSRF_SECRET_KEY config of flask app
* SECRET_KEY config of flask app
* session secret key
:param csrf_enabled: whether to use CSRF protection. If False, all
csrf behavior is suppressed.
Default: WTF_CSRF_ENABLED config value
"""
SECRET_KEY = None
TIME_LIMIT = None
def __init__(self, formdata=_Auto, obj=None, prefix='', csrf_context=None,
secret_key=None, csrf_enabled=None, *args, **kwargs):
if csrf_enabled is None:
csrf_enabled = current_app.config.get('WTF_CSRF_ENABLED', True)
self.csrf_enabled = csrf_enabled
if formdata is _Auto:
if self.is_submitted():
formdata = request.form
if request.files:
formdata = formdata.copy()
formdata.update(request.files)
elif request.json:
formdata = werkzeug.datastructures.MultiDict(request.json)
else:
formdata = None
if self.csrf_enabled:
if csrf_context is None:
csrf_context = session
if secret_key is None:
# It wasn't passed in, check if the class has a SECRET_KEY
secret_key = getattr(self, "SECRET_KEY", None)
self.SECRET_KEY = secret_key
else:
csrf_context = {}
self.SECRET_KEY = ''
super(Form, self).__init__(formdata, obj, prefix,
csrf_context=csrf_context,
*args, **kwargs)
def generate_csrf_token(self, csrf_context=None):
if not self.csrf_enabled:
return None
return generate_csrf(self.SECRET_KEY, self.TIME_LIMIT)
def validate_csrf_token(self, field):
if not self.csrf_enabled:
return True
if hasattr(request, 'csrf_valid') and request.csrf_valid:
# this is validated by CsrfProtect
return True
if not validate_csrf(field.data, self.SECRET_KEY, self.TIME_LIMIT):
raise ValidationError(field.gettext('CSRF token missing'))
def validate_csrf_data(self, data):
"""Check if the csrf data is valid.
.. versionadded: 0.9.0
:param data: the csrf string to be validated.
"""
return validate_csrf(data, self.SECRET_KEY, self.TIME_LIMIT)
def is_submitted(self):
"""
Checks if form has been submitted. The default case is if the HTTP
method is **PUT** or **POST**.
"""
return request and request.method in ("PUT", "POST")
def hidden_tag(self, *fields):
"""
Wraps hidden fields in a hidden DIV tag, in order to keep XHTML
compliance.
.. versionadded:: 0.3
:param fields: list of hidden field names. If not provided will render
all hidden fields, including the CSRF field.
"""
if not fields:
fields = [f for f in self if _is_hidden(f)]
name = current_app.config.get('WTF_HIDDEN_TAG', 'div')
attrs = current_app.config.get(
'WTF_HIDDEN_TAG_ATTRS', {'style': 'display:none;'})
tag_attrs = u' '.join(
u'%s="%s"' % (escape(k), escape(v)) for k, v in attrs.items())
tag_start = u'<%s %s>' % (escape(name), tag_attrs)
tag_end = u'</%s>' % escape(name)
rv = [tag_start]
for field in fields:
if isinstance(field, string_types):
field = getattr(self, field)
rv.append(text_type(field))
rv.append(tag_end)
return Markup(u"".join(rv))
def validate_on_submit(self):
"""
Checks if form has been submitted and if so runs validate. This is
a shortcut, equivalent to ``form.is_submitted() and form.validate()``
"""
return self.is_submitted() and self.validate()
def _get_translations(self):
if not current_app.config.get('WTF_I18N_ENABLED', True):
return None
return translations
|
dreki/GitSavvy | refs/heads/master | core/interfaces/rebase.py | 4 | import os
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ...common import ui
from ..git_command import GitCommand
from ...common import util
class GsShowRebaseCommand(WindowCommand, GitCommand):
"""
Open a status view for the active git repository.
"""
def run(self):
RebaseInterface(repo_path=self.repo_path)
class RebaseInterface(ui.Interface, GitCommand):
"""
Status dashboard.
"""
interface_type = "rebase"
read_only = True
syntax_file = "Packages/GitSavvy/syntax/rebase.tmLanguage"
word_wrap = False
CARET = "▸"
SUCCESS = "✔"
CONFLICT = "✕"
UNKNOWN = "·"
template = """\
REBASE: {active_branch} --> {base_ref} ({base_commit})
STATUS: {status}
┳ ({base_commit})
┃
{diverged_commits}
┃
┻
** All actions take immediate effect, but can be undone. **
######################## ############
## MANIPULATE COMMITS ## ## REBASE ##
######################## ############
[q] squash commit with next [f] define base ref for dashboard
[Q] squash all commits [r] rebase branch on top of...
[e] edit commit message [c] continue rebase
[d] move commit down (after next) [k] skip commit during rebase
[u] move commit up (before previous) [A] abort rebase
[w] show commit
[{super_key}-Z] undo previous action
[{super_key}-Y] redo action
{conflicts_bindings}
-
"""
conflicts_keybindings = """
###############
## CONFLICTS ##
###############
[o] open file
[s] stage file in current state
[y] use version from your commit
[b] use version from new base
[M] launch external merge tool
"""
separator = "\n ┃\n"
commit = " {caret} {status} {commit_hash} {commit_summary}{conflicts}"
conflict = " ┃ conflict: {path}"
_base_commit = None
def __init__(self, *args, **kwargs):
self.conflicts_keybindings = \
"\n".join(line[2:] for line in self.conflicts_keybindings.split("\n"))
super().__init__(*args, **kwargs)
def title(self):
return "REBASE: {}".format(os.path.basename(self.repo_path))
def pre_render(self):
self._in_rebase = self.in_rebase()
self.view.settings().set("git_savvy.in_rebase", self._in_rebase)
cached_pre_rebase_state = self.view.settings().get("git_savvy.rebase_in_progress")
if cached_pre_rebase_state:
branch_state, target_branch = cached_pre_rebase_state
self.complete_action(
branch_state,
True,
"rebased on top of {}".format(target_branch)
)
self.view.settings().set("git_savvy.rebase_in_progress", None)
@ui.partial("active_branch")
def render_active_branch(self):
return (self.rebase_branch_name()
if self._in_rebase else
self.get_current_branch_name())
@ui.partial("base_ref")
def render_base_ref(self):
return self.base_ref()
@ui.partial("base_commit")
def render_base_commit(self):
return self.base_commit()[:7]
@ui.partial("status")
def render_status(self):
if self._in_rebase:
return "Rebase halted due to CONFLICT."
log = self.view.settings().get("git_savvy.rebase_log") or []
log_len = len(log)
saved_cursor = self.view.settings().get("git_savvy.rebase_log_cursor")
cursor = saved_cursor if saved_cursor is not None else log_len - 1
if cursor < 0 and log_len > 0:
return "Redo available."
try:
cursor_entry = log[cursor]
except IndexError:
return "Ready."
if cursor == log_len - 1:
return "Successfully {}. Undo available.".format(cursor_entry["description"])
return "Successfully {}. Undo/redo available.".format(cursor_entry["description"])
@ui.partial("diverged_commits")
def render_diverged_commits(self):
commits_info = self.get_diverged_commits_info(
start=self.base_commit(),
end=self.rebase_orig_head() if self._in_rebase else "HEAD"
)
return self.separator.join(self.commit.format(**commit_info) for commit_info in commits_info)
@ui.partial("super_key")
def render_super_key(self):
return util.super_key
def get_diverged_commits_info(self, start, end):
self.entries = self.log(start_end=(start, end), reverse=True)
return (self._get_diverged_in_rebase()
if self._in_rebase else
self._get_diverged_outside_rebase())
def _get_diverged_in_rebase(self):
conflict_commit = self.rebase_conflict_at()
rewritten = dict(self.rebase_rewritten())
commits_info = []
for entry in self.entries:
was_rewritten = entry.long_hash in rewritten
new_hash = rewritten[entry.long_hash][:7] if was_rewritten else None
is_conflict = entry.long_hash == conflict_commit
if is_conflict:
conflict_paths = self._get_conflicts_in_rebase()
conflicts = (
"" if not conflict_paths else
"\n" + "\n".join(" ┃ ! {}".format(file_path)
for file_path in conflict_paths)
)
commits_info.append({
"caret": self.CARET if is_conflict else " ",
"status": (self.SUCCESS if was_rewritten else
self.CONFLICT if is_conflict else
self.UNKNOWN),
"commit_hash": new_hash if was_rewritten else entry.short_hash,
"commit_summary": ("(was {}) {}".format(entry.short_hash, entry.summary)
if was_rewritten else
entry.summary),
"conflicts": conflicts if is_conflict else ""
})
return commits_info
def _get_conflicts_in_rebase(self):
return [
entry.path
for entry in self.get_status()
if entry.index_status == "U" and entry.working_status == "U"
]
def _get_diverged_outside_rebase(self):
return [{"caret": " ",
"status": self.UNKNOWN,
"commit_hash": entry.short_hash,
"commit_summary": entry.summary,
"conflicts": ""}
for entry in self.entries]
@ui.partial("conflicts_bindings")
def render_conflicts_bindings(self):
return self.conflicts_keybindings if self._in_rebase else ""
def base_ref(self):
base_ref = self.view.settings().get("git_savvy.rebase.base_ref")
if not base_ref:
project_settings = sublime.active_window().project_data().get('settings', {})
base_ref = project_settings.get("rebase_default_base_ref", "master")
branches = list(self.get_branches())
# Check that the base_ref we return is a valid branch
if base_ref not in [branch.name_with_remote for branch in branches]:
# base_ref isn't a valid branch, so we'll try to pick a sensible alternative
local_branches = [branch for branch in branches if not branch.remote]
inactive_local_branches = [branch for branch in local_branches if not branch.active]
if inactive_local_branches:
base_ref = inactive_local_branches[0].name_with_remote
elif local_branches:
base_ref = local_branches[0].name_with_remote
else:
base_ref = "HEAD"
self.view.settings().set("git_savvy.rebase.base_ref", base_ref)
return base_ref
def base_commit(self):
if self._in_rebase:
return self.rebase_onto_commit()
base_ref = self.base_ref()
self._base_commit = self.git("merge-base", "HEAD", base_ref).strip()
return self._base_commit
def get_branch_ref(self, branch_name):
stdout = self.git("show-ref", "refs/heads/" + branch_name)
return stdout.strip().split(" ")[0]
def get_branch_state(self):
branch_name = self.get_current_branch_name()
ref = self.get_branch_ref(branch_name)
return branch_name, ref
def complete_action(self, branch_state, success, description):
log = self.view.settings().get("git_savvy.rebase_log") or []
cursor = self.view.settings().get("git_savvy.rebase_log_cursor") or (len(log) - 1)
log = log[:cursor+1]
branch_name, ref_before = branch_state
log.append({
"description": description,
"branch_name": branch_name,
"ref_before": ref_before,
"ref_after": self.get_branch_ref(branch_name),
"success": success
})
cursor = len(log) - 1
self.set_log(log, cursor)
def get_log(self):
settings = self.view.settings()
return settings.get("git_savvy.rebase_log"), settings.get("git_savvy.rebase_log_cursor")
def set_log(self, log, cursor):
self.view.settings().set("git_savvy.rebase_log", log)
self.view.settings().set("git_savvy.rebase_log_cursor", cursor)
class GsRebaseUndoCommand(TextCommand, GitCommand):
"""
Revert branch HEAD to point to commit prior to previous action.
"""
def run(self, edit):
self.interface = ui.get_interface(self.view.id())
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == -1:
return
branch_name, ref = self.interface.get_branch_state()
current = log[cursor]
if current["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot undo.")
return
try:
self.checkout_ref(current["ref_before"])
self.git("branch", "-f", branch_name, "HEAD")
cursor -= 1
except Exception as e:
sublime.error_message("Error encountered. Cannot undo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
class GsRebaseRedoCommand(TextCommand, GitCommand):
"""
If an undo action was taken, set branch HEAD to point to commit of
un-done action.
"""
def run(self, edit):
self.interface = ui.get_interface(self.view.id())
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == len(log) - 1:
return
branch_name, ref = self.interface.get_branch_state()
undone_action = log[cursor+1]
if undone_action["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot redo.")
return
try:
self.checkout_ref(undone_action["ref_after"])
self.git("branch", "-f", branch_name, "HEAD")
cursor += 1
except Exception as e:
sublime.error_message("Error encountered. Cannot redo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
ui.register_listeners(RebaseInterface)
class RewriteBase(TextCommand, GitCommand):
"""
Base class for all commit manipulation actions.
"""
def run(self, edit):
self.interface = ui.get_interface(self.view.id())
sublime.set_timeout_async(self.run_async, 0)
def get_selected_short_hash(self):
sels = self.view.sel()
if len(sels) > 1 or not sels or sels[0].a != sels[0].b:
return
line = self.view.line(sels[0])
line_str = self.view.substr(line)
return line_str[7:14]
def make_changes(self, commit_chain, description):
branch_state = self.interface.get_branch_state()
success = True
try:
self.rewrite_active_branch(
base_commit=self.interface.base_commit(),
commit_chain=commit_chain
)
except Exception as e:
success = False
raise e
finally:
self.interface.complete_action(branch_state, success, description)
util.view.refresh_gitsavvy(self.view)
class GsRebaseSquashCommand(RewriteBase):
def run_async(self):
short_hash = self.get_selected_short_hash()
if not short_hash:
return
# Cannot squash last commit.
if self.interface.entries[-1].short_hash == short_hash:
sublime.status_message("Unable to squash most recent commit.")
return
# Generate identical change templates with author/date metadata
# in tact. In case of commit-to-squash, indicate that the changes
# should be rolled over into the next change's commit.
commit_chain = [
self.ChangeTemplate(orig_hash=entry.long_hash,
do_commit=entry.short_hash != short_hash,
msg=entry.raw_body,
datetime=entry.datetime,
author="{} <{}>".format(entry.author, entry.email))
for entry in self.interface.entries
]
# Take the commit message from the commit-to-squash and append
# it to the next commit's message.
for idx, commit in enumerate(commit_chain):
if not commit.do_commit:
commit_chain[idx+1].msg += "\n\n" + commit.msg
commit.msg = None
self.make_changes(commit_chain, "squashed " + short_hash)
class GsRebaseSquashAllCommand(RewriteBase):
def run_async(self):
# Generate identical change templates with author/date metadata
# in tact. However, set do_commit to false for all but the last change,
# in order for diffs to be rolled into that final commit.
last_commit_idx = len(self.interface.entries) - 1
commit_chain = [
self.ChangeTemplate(orig_hash=entry.long_hash,
do_commit=idx == last_commit_idx,
msg=entry.raw_body,
datetime=entry.datetime,
author="{} <{}>".format(entry.author, entry.email))
for idx, entry in enumerate(self.interface.entries)
]
# Take the commit message from the commit-to-squash and append
# it to the next commit's message.
for idx, commit in enumerate(commit_chain):
if not commit.do_commit:
commit_chain[idx+1].msg += "\n\n" + commit.msg
commit.msg = None
self.make_changes(commit_chain, "squashed all commits")
class GsRebaseEditCommand(RewriteBase):
def run(self, edit):
self.interface = ui.get_interface(self.view.id())
short_hash = self.get_selected_short_hash()
for entry in self.interface.entries:
if entry.short_hash == short_hash:
break
else:
return
ui.EditView(content=entry.raw_body,
repo_path=self.repo_path,
window=self.view.window(),
on_done=lambda commit_msg: self.do_edit(entry, commit_msg))
def do_edit(self, entry_to_edit, commit_msg):
# Generate identical change templates with author/date metadata
# in tact. For the edited entry, replace the message with
# the content from the temporary edit view.
commit_chain = [
self.ChangeTemplate(orig_hash=entry.long_hash,
do_commit=True,
msg=commit_msg if entry == entry_to_edit else entry.raw_body,
datetime=entry.datetime,
author="{} <{}>".format(entry.author, entry.email))
for entry in self.interface.entries
]
self.make_changes(commit_chain, "edited " + entry_to_edit.short_hash)
class GsRebaseMoveUpCommand(RewriteBase):
def run_async(self):
short_hash = self.get_selected_short_hash()
if not short_hash:
return
if self.interface.entries[0].short_hash == short_hash:
sublime.status_message("Unable to move first commit up.")
commit_chain = [
self.ChangeTemplate(orig_hash=entry.long_hash,
move=entry.short_hash == short_hash,
do_commit=True,
msg=entry.raw_body,
datetime=entry.datetime,
author="{} <{}>".format(entry.author, entry.email))
for entry in self.interface.entries
]
# Take the change to move and swap it with the one before.
for idx, commit in enumerate(commit_chain):
if commit.move:
commit_chain[idx], commit_chain[idx-1] = commit_chain[idx-1], commit_chain[idx]
break
try:
self.make_changes(commit_chain, "moved " + short_hash + " up")
except:
sublime.message_dialog("Unable to move commit, most likely due to a conflict.")
class GsRebaseMoveDownCommand(RewriteBase):
def run_async(self):
short_hash = self.get_selected_short_hash()
if not short_hash:
return
if self.interface.entries[-1].short_hash == short_hash:
sublime.status_message("Unable to move last commit down.")
return
commit_chain = [
self.ChangeTemplate(orig_hash=entry.long_hash,
move=entry.short_hash == short_hash,
do_commit=True,
msg=entry.raw_body,
datetime=entry.datetime,
author="{} <{}>".format(entry.author, entry.email))
for entry in self.interface.entries
]
# Take the change to move and swap it with the one following.
for idx, commit in enumerate(commit_chain):
if commit.move:
commit_chain[idx], commit_chain[idx+1] = commit_chain[idx+1], commit_chain[idx]
break
try:
self.make_changes(commit_chain, "moved " + short_hash + " down")
except:
sublime.message_dialog("Unable to move commit, most likely due to a conflict.")
class GsRebaseShowCommitCommand(RewriteBase):
def run_async(self):
short_hash = self.get_selected_short_hash()
if not short_hash:
return
long_hash = None
for entry in self.interface.entries:
if entry.short_hash == short_hash:
long_hash = entry.long_hash
if not long_hash:
return
self.view.window().run_command("gs_show_commit", {"commit_hash": long_hash})
class GsRebaseOpenFileCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
sels = self.view.sel()
line_regions = [self.view.line(sel) for sel in sels]
abs_paths = [os.path.join(self.repo_path, line[18:])
for reg in line_regions
for line in self.view.substr(reg).split("\n") if line]
for path in abs_paths:
self.view.window().open_file(path)
class GsRebaseStageFileCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
sels = self.view.sel()
line_regions = [self.view.line(sel) for sel in sels]
paths = (line[18:]
for reg in line_regions
for line in self.view.substr(reg).split("\n") if line)
for path in paths:
self.stage_file(path)
util.view.refresh_gitsavvy(self.view)
class GsRebaseUseCommitVersionCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
sels = self.view.sel()
line_regions = [self.view.line(sel) for sel in sels]
paths = (line[18:]
for reg in line_regions
for line in self.view.substr(reg).split("\n") if line)
for path in paths:
self.git("checkout", "--theirs", "--", path)
self.stage_file(path)
util.view.refresh_gitsavvy(self.view)
class GsRebaseUseBaseVersionCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
sels = self.view.sel()
line_regions = [self.view.line(sel) for sel in sels]
paths = (line[18:]
for reg in line_regions
for line in self.view.substr(reg).split("\n") if line)
for path in paths:
self.git("checkout", "--ours", "--", path)
self.stage_file(path)
util.view.refresh_gitsavvy(self.view)
class GsRebaseLaunchMergeToolCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
sels = self.view.sel()
line_regions = [self.view.line(sel) for sel in sels]
paths = [os.path.join(self.repo_path, line[18:])
for reg in line_regions
for line in self.view.substr(reg).split("\n") if line]
if len(paths) > 1:
sublime.error_message("You can only launch merge tool for a single file at a time.")
return
self.launch_tool_for_file(paths[0])
class GsRebaseDefineBaseRefCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
self.entries = [branch.name_with_remote
for branch in self.get_branches()
if not branch.active]
self.view.window().show_quick_panel(
self.entries,
self.on_selection
)
def on_selection(self, idx):
if idx == -1:
return
self.view.settings().set("git_savvy.rebase.base_ref", self.entries[idx])
util.view.refresh_gitsavvy(self.view)
class GsRebaseOnTopOfCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
self.entries = [branch.name_with_remote
for branch in self.get_branches()
if not branch.active]
self.view.window().show_quick_panel(
self.entries,
self.on_selection
)
def on_selection(self, idx):
if idx == -1:
return
selection = self.entries[idx]
interface = ui.get_interface(self.view.id())
branch_state = interface.get_branch_state()
self.view.settings().set("git_savvy.rebase_in_progress", (branch_state, selection))
self.view.settings().set("git_savvy.rebase.base_ref", selection)
self.git("rebase", selection)
util.view.refresh_gitsavvy(self.view)
class GsRebaseAbortCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
try:
self.git("rebase", "--abort")
finally:
util.view.refresh_gitsavvy(self.view)
class GsRebaseContinueCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
try:
self.git("rebase", "--continue")
finally:
util.view.refresh_gitsavvy(self.view)
class GsRebaseSkipCommand(TextCommand, GitCommand):
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
try:
self.git("rebase", "--skip")
finally:
util.view.refresh_gitsavvy(self.view)
|
bohlian/frappe | refs/heads/develop | frappe/core/report/document_share_report/__init__.py | 12133432 | |
leki75/ansible | refs/heads/devel | lib/ansible/modules/remote_management/ipmi/__init__.py | 12133432 | |
waheedahmed/edx-platform | refs/heads/master | common/djangoapps/monitoring/__init__.py | 12133432 | |
subutai/NAB | refs/heads/master | nab/detectors/htmjava/__init__.py | 12133432 | |
minyoungg/selfconsistency | refs/heads/master | models/exif/exif_solver.py | 1 | import os, sys, numpy as np, time
import init_paths
import tensorflow as tf
import tensorflow.contrib.slim as slim
from utils import ops, io
import traceback
from collections import deque
class ExifSolver(object):
def __init__(self, checkpoint=None, use_exif_summary=True, exp_name='no_name', init_summary=True):
"""
Args
checkpoint: .ckpt file to initialize weights from
use_exif_summary: EXIF accuracy are stored
exp_name: ckpt and tb name prefix
init_summary: will create TB files, will override use_exif_summary arg
"""
self.checkpoint = None if checkpoint in ['', None] else checkpoint
self.exp_name = exp_name
self._batch_size = 128
self.use_exif_summary = use_exif_summary
self.init_summary = init_summary
self.ckpt_path = os.path.join('./ckpt', exp_name, exp_name)
io.make_dir(self.ckpt_path)
self.train_iterations = 10000000
self.test_init = True
self.show_iter = 20
self.test_iter = 2000
self.save_iter = 10000
self.train_timer = deque(maxlen=10)
return
def setup_net(self, net):
""" Links and setup loss and summary """
# Link network
self.net = net
# Initialize some basic things
self.sess = tf.Session(config=ops.config(self.net.use_gpu))
if self.init_summary:
self.train_writer = tf.summary.FileWriter(os.path.join('./tb', self.exp_name + '_train'), self.sess.graph)
self.test_writer = tf.summary.FileWriter(os.path.join('./tb', self.exp_name + '_test'))
self.setup_summary()
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
# Try to load checkpoint
if self.checkpoint is not None:
assert os.path.exists(self.checkpoint) or os.path.exists(self.checkpoint + '.index'), 'checkpoint does not exist'
try:
self.saver.restore(self.sess, self.checkpoint)
self.i = io.parse_checkpoint(self.checkpoint)
print 'Succesfully resuming from %s' % self.checkpoint
except Exception:
print traceback.format_exc()
try:
print 'Model and checkpoint did not match, attempting to restore only weights'
variables_to_restore = ops.get_variables(self.checkpoint, exclude_scopes=['Adam'])
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(self.sess, self.checkpoint)
except Exception:
print 'Model and checkpoint did not match, attempting to partially restore'
self.sess.run(tf.global_variables_initializer())
# Make sure you correctly set exclude_scopes if you are finetuining models or extending it
variables_to_restore = ops.get_variables(self.checkpoint, exclude_scopes=['classify']) #'resnet_v2_50/logits/', 'predict',
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(self.sess, self.checkpoint)
print 'Variables intitializing from scratch'
for var in tf.trainable_variables():
if var not in variables_to_restore:
print var
print 'Succesfully restored %i variables' % len(variables_to_restore)
self.i = 0
else:
print 'Initializing from scratch'
self.i = 0
self.sess.run(tf.global_variables_initializer())
self.start_i = self.i
if self.net.use_tf_threading:
self.coord = tf.train.Coordinator()
self.net.train_runner.start_p_threads(self.sess)
tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
return
def setup_summary(self):
""" Setup summary """
max_num_out = 2
self.summary = [
tf.summary.image('input_a', self.net.im_a, max_outputs=max_num_out),
tf.summary.image('input_b', self.net.im_b, max_outputs=max_num_out),
tf.summary.scalar('total_loss', self.net.total_loss),
tf.summary.scalar('learning_rate', self.net._opt._lr)
]
if not self.net.freeze_base:
self.summary.extend([tf.summary.scalar('exif_loss', self.net.loss),
tf.summary.scalar('exif_accuracy', self.net.accuracy)])
if self.net.train_classifcation:
self.summary.extend([tf.summary.scalar('cls_loss', self.net.cls_loss),
tf.summary.scalar('cls_accuracy', self.net.cls_accuracy)])
if self.use_exif_summary:
self.tag_holder = {tag:tf.placeholder(tf.float32) for tag in self.net.train_runner.tags}
self.individual_summary = {tag:tf.summary.scalar('individual/' + tag, self.tag_holder[tag]) for tag in self.net.train_runner.tags}
return
def setup_data(self, data, data_fn=None):
assert not self.net.use_tf_threading, "Using queue runner"
self.data = data
if data_fn is not None:
self.data_fn = data_fn
else:
try:
self.data_fn = self.data.exif_balanced_nextbatch
except:
self.data_fn = self.data.nextbatch
assert self.data_fn is not None
return
def get_data(self, batch_size, split='train'):
""" Make sure to pass None even if not using final classification """
assert self.data is not None
if batch_size is None:
batch_size = self._batch_size
data_dict = self.data_fn(batch_size, split=split)
args = {self.net.im_a:data_dict['im_a'],
self.net.im_b:data_dict['im_b']}
if 'cls_lbl' in data_dict:
args[self.net.cls_label] = data_dict['cls_lbl']
if 'exif_lbl' in data_dict:
args[self.net.label] = data_dict['exif_lbl']
return args
def train(self):
print 'Started training'
while self.i < self.train_iterations:
if self.test_init and self.i == self.start_i:
print('Testing initialization')
self.test(writer=self.test_writer)
self._train()
self.i += 1
if self.i % self.show_iter == 0:
self.show(writer=self.train_writer, phase='train')
if self.i % self.test_iter == 0:
self.test(writer=self.test_writer)
if self.i % self.save_iter == 0 and self.i != self.start_i:
io.make_ckpt(self.saver, self.sess, self.ckpt_path, self.i)
return
def _train(self):
start_time = time.time()
if self.net.use_tf_threading:
self.sess.run(self.net.opt)
else:
args = self.get_data(self.net.batch_size, 'train')
self.sess.run(self.net.opt, feed_dict=args)
self.train_timer.append(time.time() - start_time)
return
def show(self, writer, phase='train'):
if self.net.use_tf_threading:
summary = self.sess.run(self.summary)
else:
args = self.get_data(self.net.batch_size, phase)
summary = self.sess.run(self.summary, feed_dict=args)
io.add_summary(writer, summary, self.i)
io.show([['Train time', np.mean(list(self.train_timer))]],
phase=phase, iter=self.i)
return
def test(self, writer):
if self.use_exif_summary:
exif_start = time.time()
test_queue = self.net.train_runner.get_random_test(batch_size=self.net.batch_size)
to_print = []
for i, (im_a_batch, im_b_batch, label_batch) in enumerate(test_queue):
tag = self.net.train_runner.tags[i]
output = self.sess.run(self.net.pred, feed_dict={self.net.im_a:im_a_batch,
self.net.im_b:im_b_batch,
self.net.label:label_batch,
self.net.is_training:False})
tag_acc = 100.0 * (np.sum(np.round(output[:, i]) == label_batch[:, i])/float(self.net.batch_size))
summary = self.sess.run(self.individual_summary[tag], feed_dict={self.tag_holder[tag]:tag_acc})
io.add_summary(writer, [summary], self.i)
to_print.append([tag, tag_acc])
io.show(to_print, phase='test', iter=self.i)
print('EXIF test accuracy evaluation took %.2f seconds' % (time.time() - exif_start))
return
def initialize(args):
return ExifSolver(**args)
|
kutenai/django | refs/heads/master | tests/sitemaps_tests/urls/__init__.py | 12133432 | |
phoebusliang/parallel-lettuce | refs/heads/master | tests/integration/lib/Django-1.3/tests/modeltests/many_to_one_null/__init__.py | 12133432 | |
kswiat/django | refs/heads/master | django/conf/locale/kn/__init__.py | 12133432 | |
anandpdoshi/frappe | refs/heads/develop | frappe/website/doctype/blog_settings/__init__.py | 12133432 | |
MediaSapiens/autonormix | refs/heads/master | django/views/decorators/__init__.py | 12133432 | |
joeyjojo/django_offline | refs/heads/master | src/django/conf/locale/km/__init__.py | 12133432 | |
roninio/gae-boilerplate | refs/heads/Fix-Issue-253 | boilerplate/external/wtforms/ext/django/i18n.py | 119 | from django.utils.translation import ugettext, ungettext
from wtforms import form
class DjangoTranslations(object):
"""
A translations object for WTForms that gets its messages from django's
translations providers.
"""
def gettext(self, string):
return ugettext(string)
def ngettext(self, singular, plural, n):
return ungettext(singular, plural, n)
class Form(form.Form):
"""
A Form derivative which uses the translations engine from django.
"""
_django_translations = DjangoTranslations()
def _get_translations(self):
return self._django_translations
|
40223139/203739test | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/colorsys.py | 1066 | """Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
|
tersmitten/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_software_install.py | 16 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_software_install import ApiParameters
from library.modules.bigip_software_install import ModuleParameters
from library.modules.bigip_software_install import ModuleManager
from library.modules.bigip_software_install import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_software_install import ApiParameters
from ansible.modules.network.f5.bigip_software_install import ModuleParameters
from ansible.modules.network.f5.bigip_software_install import ModuleManager
from ansible.modules.network.f5.bigip_software_install import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
volume='HD1.2',
image='BIGIP-13.0.0.0.0.1645.iso',
)
p = ModuleParameters(params=args)
assert p.volume == 'HD1.2'
assert p.image == 'BIGIP-13.0.0.0.0.1645.iso'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
image='BIGIP-13.0.0.0.0.1645.iso',
volume='HD1.2',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters()
current.read_image_from_device = Mock(
side_effect=[
['BIGIP-13.0.0.0.0.1645.iso'],
['BIGIP-12.1.3.4-0.0.2.iso'],
]
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.have = current
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.volume_exists = Mock(return_value=True)
mm.update_on_device = Mock(return_value=True)
mm.wait_for_device_reboot = Mock(return_value=True)
mm.wait_for_software_install_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
azatoth/scons | refs/heads/master | test/CXX/SHCXXFLAGS.py | 5 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that $SHCXXFLAGS settings are used to build shared object files.
"""
import os
import sys
import TestSCons
_obj = TestSCons._obj
if os.name == 'posix':
os.environ['LD_LIBRARY_PATH'] = '.'
if sys.platform.find('irix') > -1:
os.environ['LD_LIBRARYN32_PATH'] = '.'
test = TestSCons.TestSCons()
e = test.Environment()
test.write('SConstruct', """
foo = Environment(WINDOWS_INSERT_DEF=1)
foo.Append(SHCXXFLAGS = '-DFOO')
bar = Environment(WINDOWS_INSERT_DEF=1)
bar.Append(SHCXXFLAGS = '-DBAR')
foo_obj = foo.SharedObject(target = 'foo%(_obj)s', source = 'prog.cpp')
bar_obj = bar.SharedObject(target = 'bar%(_obj)s', source = 'prog.cpp')
foo.SharedLibrary(target = 'foo', source = foo_obj)
bar.SharedLibrary(target = 'bar', source = bar_obj)
fooMain = foo.Clone(LIBS='foo', LIBPATH='.')
foo_obj = fooMain.Object(target='foomain', source='main.c')
fooMain.Program(target='fooprog', source=foo_obj)
barMain = bar.Clone(LIBS='bar', LIBPATH='.')
bar_obj = barMain.Object(target='barmain', source='main.c')
barMain.Program(target='barprog', source=bar_obj)
""" % locals())
test.write('foo.def', r"""
LIBRARY "foo"
DESCRIPTION "Foo Shared Library"
EXPORTS
doIt
""")
test.write('bar.def', r"""
LIBRARY "bar"
DESCRIPTION "Bar Shared Library"
EXPORTS
doIt
""")
test.write('prog.cpp', r"""
#include <stdio.h>
extern "C" void
doIt()
{
#ifdef FOO
printf("prog.cpp: FOO\n");
#endif
#ifdef BAR
printf("prog.cpp: BAR\n");
#endif
}
""")
test.write('main.c', r"""
void doIt();
int
main(int argc, char* argv[])
{
doIt();
return 0;
}
""")
test.run(arguments = '.')
test.run(program = test.workpath('fooprog'), stdout = "prog.cpp: FOO\n")
test.run(program = test.workpath('barprog'), stdout = "prog.cpp: BAR\n")
test.write('SConstruct', """
bar = Environment(WINDOWS_INSERT_DEF=1)
bar.Append(SHCXXFLAGS = '-DBAR')
foo_obj = bar.SharedObject(target = 'foo%(_obj)s', source = 'prog.cpp')
bar_obj = bar.SharedObject(target = 'bar%(_obj)s', source = 'prog.cpp')
bar.SharedLibrary(target = 'foo', source = foo_obj)
bar.SharedLibrary(target = 'bar', source = bar_obj)
barMain = bar.Clone(LIBS='bar', LIBPATH='.')
foo_obj = barMain.Object(target='foomain', source='main.c')
bar_obj = barMain.Object(target='barmain', source='main.c')
barMain.Program(target='barprog', source=foo_obj)
barMain.Program(target='fooprog', source=bar_obj)
""" % locals())
test.run(arguments = '.')
test.run(program = test.workpath('fooprog'), stdout = "prog.cpp: BAR\n")
test.run(program = test.workpath('barprog'), stdout = "prog.cpp: BAR\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
UpOut/objectify | refs/heads/master | objectify/model/base.py | 1 | # coding: utf-8
#In order of performance
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
from ..base import ObjectifyObject
class ObjectifyModel(ObjectifyObject):
__fetch_attr__ = None
__serializer__ = json.dumps
__deserializer__ = json.loads
__key_name__ = None
__fetch_attrs__ = None
__fetch_key__ = None
def __init__(self,name=None,fetch_key=False,fetch_attrs=[],
serializer=None,deserializer=None,**kwargs):
super(ObjectifyModel, self).__init__(
name=name,
fetch_key=fetch_key,
fetch_attrs=fetch_attrs,
serializer=serializer,
deserializer=deserializer,**kwargs
)
self.__key_name__ = name
self.__fetch_attrs__ = set(fetch_attrs)
self.__fetch_key__ = fetch_key
if serializer is not None:
self.__serializer__ = serializer
if deserializer is not None:
self.__deserializer__ = deserializer
default = kwargs.get("default",None)
if default:
self.from_collection(default)
def fetch_key_value(self):
return getattr(self,self.__fetch_attr__)
def set_fetch_key_value(self,val):
return setattr(self,self.__fetch_attr__,val)
def serialize(self):
return self.__serializer__(self.to_collection())
def deserialize(self,val):
return self.from_collection(
self.__deserializer__(val)
)
def copy_inited(self,keep_name=True):
if keep_name:
self.__init_kwargs__['name'] = self.__key_name__
return self.__class__(
*self.__init_args__,
**self.__init_kwargs__
)
def example_value(self):
raise NotImplementedError() |
ROB-Seismology/oq-hazardlib | refs/heads/rob-hazardlib | openquake/hazardlib/site.py | 1 | # The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.site` defines :class:`Site`.
"""
import numpy
from openquake.hazardlib.geo.mesh import Mesh
from openquake.hazardlib.slots import with_slots
@with_slots
class Site(object):
"""
Site object represents a geographical location defined by its position
as well as its soil characteristics.
:param location:
Instance of :class:`~openquake.hazardlib.geo.point.Point` representing
where the site is located.
:param vs30:
Average shear wave velocity in the top 30 m, in m/s.
:param vs30measured:
Boolean value, ``True`` if ``vs30`` was measured on that location
and ``False`` if it was inferred.
:param z1pt0:
Vertical distance from earth surface to the layer where seismic waves
start to propagate with a speed above 1.0 km/sec, in meters.
:param z2pt5:
Vertical distance from earth surface to the layer where seismic waves
start to propagate with a speed above 2.5 km/sec, in km.
:param kappa:
Kappa value in seconds
:param id:
Optional parameter with default None. If given, it should be an
integer identifying the site univocally.
:raises ValueError:
If any of ``vs30``, ``z1pt0`` or ``z2pt5`` is zero or negative.
.. note::
:class:`Sites <Site>` are pickleable
"""
__slots__ = 'location vs30 vs30measured z1pt0 z2pt5 kappa id'.split()
def __init__(self, location, vs30, vs30measured, z1pt0, z2pt5, kappa=0., id=None):
if not vs30 > 0:
raise ValueError('vs30 must be positive')
if not z1pt0 > 0:
raise ValueError('z1pt0 must be positive')
if not z2pt5 > 0:
raise ValueError('z2pt5 must be positive')
self.location = location
self.vs30 = vs30
self.vs30measured = vs30measured
self.z1pt0 = z1pt0
self.z2pt5 = z2pt5
self.kappa = kappa
self.id = id
def __str__(self):
"""
>>> import openquake.hazardlib
>>> loc = openquake.hazardlib.geo.point.Point(1, 2, 3)
>>> str(Site(loc, 760.0, True, 100.0, 5.0))
'<Location=<Latitude=2.000000, Longitude=1.000000, Depth=3.0000>, \
Vs30=760.0000, Vs30Measured=True, Depth1.0km=100.0000, Depth2.5km=5.0000>'
"""
return (
"<Location=%s, Vs30=%.4f, Vs30Measured=%r, Depth1.0km=%.4f, "
"Depth2.5km=%.4f, kappa=%.3f>") % (
self.location, self.vs30, self.vs30measured, self.z1pt0,
self.z2pt5, self.kappa)
def __repr__(self):
"""
>>> import openquake.hazardlib
>>> loc = openquake.hazardlib.geo.point.Point(1, 2, 3)
>>> site = Site(loc, 760.0, True, 100.0, 5.0)
>>> str(site) == repr(site)
True
"""
return self.__str__()
class SiteCollection(object):
"""
A collection of :class:`sites <Site>`.
Instances of this class are intended to represent a large collection
of sites in a most efficient way in terms of memory usage.
.. note::
Because calculations assume that :class:`Sites <Site>` are on the
Earth's surface, all `depth` information in a :class:`SiteCollection`
is discarded. The collection `mesh` will only contain lon and lat. So
even if a :class:`SiteCollection` is created from sites containing
`depth` in their geometry, iterating over the collection will yield
:class:`Sites <Site>` with a reference depth of 0.0.
:param sites:
A list of instances of :class:`Site` class.
"""
def __init__(self, sites):
self.indices = None
self.vs30 = numpy.zeros(len(sites))
self.vs30measured = numpy.zeros(len(sites), dtype=bool)
self.z1pt0 = self.vs30.copy()
self.z2pt5 = self.vs30.copy()
self.kappa = self.vs30.copy()
lons = self.vs30.copy()
lats = self.vs30.copy()
depths = self.vs30.copy()
for i in xrange(len(sites)):
self.vs30[i] = sites[i].vs30
self.vs30measured[i] = sites[i].vs30measured
self.z1pt0[i] = sites[i].z1pt0
self.z2pt5[i] = sites[i].z2pt5
self.kappa[i] = sites[i].kappa
lons[i] = sites[i].location.longitude
lats[i] = sites[i].location.latitude
depths[i] = sites[i].location.depth
#self.mesh = Mesh(lons, lats, depths=None)
self.mesh = Mesh(lons, lats, depths=depths)
# protect arrays from being accidentally changed. it is useful
# because we pass these arrays directly to a GMPE through
# a SiteContext object and if a GMPE is implemented poorly it could
# modify the site values, thereby corrupting site and all the
# subsequent calculation. note that this doesn't protect arrays from
# being changed by calling itemset()
for arr in (self.vs30, self.vs30measured, self.z1pt0, self.z2pt5,
self.kappa, self.mesh.lons, self.mesh.lats):
arr.flags.writeable = False
def __iter__(self):
"""
Iterate through all :class:`sites <Site>` in the collection, yielding
one at a time.
"""
for i, location in enumerate(self.mesh):
yield Site(location, self.vs30[i], self.vs30measured[i],
self.z1pt0[i], self.z2pt5[i], self.kappa[i])
def expand(self, data, total_sites, placeholder):
"""
Expand an array that was created for a filtered site collection
with respect to indices of the sites that were :meth:`filtered
<filter>`.
The typical workflow is the following: there is a whole site
collection, the one that has an information about all the sites.
Then it gets filtered for performing some calculation on a limited
set of sites (like for instance filtering sites by their proximity
to a rupture). That filtering process can be repeated arbitrary
number of times, i.e. a collection that is already filtered can
be filtered for further limiting the set of sites to compute on.
Then the (supposedly expensive) computation is done on a limited
set of sites which still appears as just a :class:`SiteCollection`
instance, so that computation code doesn't need to worry about
filtering, it just needs to handle site collection objects. The
calculation result comes in a form of 1d or 2d numpy array (that
is, either one value per site or one 1d array per site) with length
equal to number of sites in a filtered collection. That result
needs to be expanded to an array of similar structure but the one
that holds values for all the sites in the original (unfiltered)
collection. This is what :meth:`expand` is for. It creates a result
array of ``total_sites`` length and puts values from ``data`` into
appropriate places in it remembering indices of sites that were
chosen for actual calculation and leaving ``placeholder`` value
everywhere else.
:param data:
1d or 2d numpy array with first dimension representing values
computed for site from this collection.
:param total_sites:
Integer number representing a total number of sites in
a collection this one was created from.
:param placeholder:
A scalar value to be put in result array for those sites that
were filtered out and no real calculation was performed for them.
:returns:
Array of length ``total_sites`` with values from ``data``
distributed in the appropriate places.
"""
num_sites_computed = data.shape[0]
assert num_sites_computed == len(self)
if self.indices is None:
assert total_sites == num_sites_computed
# nothing to expand: this sites collection was not filtered
return data
assert num_sites_computed < total_sites
assert self.indices[-1] < total_sites
if data.ndim == 1:
# single-dimensional array
result = numpy.empty(total_sites)
result.fill(placeholder)
result.put(self.indices, data)
return result
assert data.ndim == 2
# two-dimensional array
num_values = data.shape[1]
result = numpy.empty((total_sites, num_values))
result.fill(placeholder)
for i in xrange(num_values):
result[:, i].put(self.indices, data[:, i])
return result
def filter(self, mask):
"""
Create a new collection with only a subset of sites from this one.
:param mask:
Numpy array of boolean values of the same length as this sites
collection. ``True`` values should indicate that site with that
index should be included into the filtered collection.
:returns:
A new :class:`SiteCollection` instance, unless all the values
in ``mask`` are ``True``, in which case this site collection
is returned, or if all the values in ``mask`` are ``False``,
in which case method returns ``None``. New collection has data
of only those sites that were marked for inclusion in mask.
See also :meth:`expand`.
"""
assert len(mask) == len(self)
if mask.all():
# all sites satisfy the filter, return
# this collection unchanged
return self
if not mask.any():
# no sites pass the filter, return None
return None
col = object.__new__(SiteCollection)
# extract indices of Trues from the mask
[indices] = mask.nonzero()
# take only needed values from this collection
# to a new one
col.vs30 = self.vs30.take(indices)
col.vs30measured = self.vs30measured.take(indices)
col.z1pt0 = self.z1pt0.take(indices)
col.z2pt5 = self.z2pt5.take(indices)
col.kappa = self.kappa.take(indices)
col.mesh = Mesh(self.mesh.lons.take(indices),
self.mesh.lats.take(indices),
depths=None)
if self.indices is not None:
# if this collection was already a subset of some other
# collection (a result of :meth:`filter` itself) than mask's
# indices represent values in a filtered collection, but
# we need to keep track of original indices in the whole
# (unfiltered) collection. here we save original indices
# of sites in this double- (or more times) filtered
# collection
col.indices = self.indices.take(indices)
else:
col.indices = indices
# do the same as in the constructor
for arr in (col.vs30, col.vs30measured, col.z1pt0, col.z2pt5,
col.mesh.lons, col.mesh.lats):
arr.flags.writeable = False
return col
def __len__(self):
"""
Return a number of sites in a collection.
"""
return len(self.mesh)
|
ekiwi/tinyos-1.x | refs/heads/master | contrib/handhelds/apps/EKG/ekg.py | 2 | #!/usr/bin/python
#
# Copyright (c) 2004,2005 Hewlett-Packard Company
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Hewlett-Packard Company nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ekg.py
#
# Subscribe to a SIPLite ekg server and display data stream.
from twisted.internet import gtk2reactor
gtk2reactor.install()
import gobject, gtk
from gtk import glade
import sys,struct,time,re
import pango
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.python import util
import sys
###########################################################################################
class HeartData:
'Unpack a struct EKGData from the ekg sensor'
def __init__(self,data):
self.min = 65535
self.max = -65535
(self.number, sample_size) = struct.unpack('Ih', data[:6])
self.hr = 512
self.samples = []
if config['verbose'] > 2: print [ord(x) for x in data[6:]]
if config['verbose'] > 1: print 'sample_size=', sample_size
if sample_size == 2:
for i in range(6,len(data),2):
(sample,) = struct.unpack('h', data[i:i+2])
if sample < self.min: self.min = sample
if sample > self.max: self.max = sample
self.samples.append(sample)
else:
for i in range(6,len(data)):
(sample,) = struct.unpack('b', data[i:i+1])
if sample < self.min: self.min = sample
if sample > self.max: self.max = sample
self.samples.append(sample)
class SIPLiteTestReceiver( DatagramProtocol ):
'Set up an SIPLite receiver'
def __init__(self,mw):
self.mw = mw
self.mw.min = 65535
self.mw.max = -65535
def datagramReceived(self, datagram, addr):
hd = HeartData( datagram )
if hd.min < self.mw.min: self.mw.min = hd.min
if hd.max > self.mw.max: self.mw.max = hd.max
if hd.hr < 511: hrText = "%d" % hd.hr
else: hrText = "N/A"
if config['verbose']:
hrText = 'minv=%d maxv=%d' % (self.mw.min, self.mw.max)
else:
hrText = ''
mw._heartRateLabel.set_text( hrText )
mw.add_pleth_data( hd.number, hd.samples )
class SIPLiteTestClient( DatagramProtocol ):
def __init__(self,host,remote_port,protocol,mw):
self.host = host
self.remote_port = remote_port
self.timeout = 20
self.protocol = protocol
receiver = SIPLiteTestReceiver(mw)
reactor.listenUDP( 0, receiver )
self.local_port = receiver.transport.getHost().port
def startProtocol(self):
self.transport.connect(self.host, self.remote_port)
self.sendDatagram()
def sendDatagram(self):
msglist = ["INVITE SIPLITE/1.0",
"Expires: %d" % self.timeout,
"Call-ID: FOOBAR",
"",
"m=%d %d" % (self.local_port, self.protocol) ]
msg = "\r\n".join(msglist) + "\r\n"
print "\nSEND INVITE expires %d" % self.timeout
try:
self.transport.write(msg)
except:
sys.exit(0)
if self.timeout:
reactor.callLater( self.timeout * 0.6, self.sendDatagram )
def datagramReceived(self, datagram, addr):
print '\nCOMMAND:', repr(datagram), "from", repr(addr)
expires = re.search( r'Expires: (\d+)', datagram).group(1)
if expires and int(expires) == 0:
reactor.stop()
def terminate(self):
self.timeout = 0
self.sendDatagram()
###########################################################################################
class MainWindow:
def __init__(self):
gladefile = util.sibpath(__file__, 'ekg.glade' )
self.glade = glade.XML(gladefile)
self.glade.signal_autoconnect(self)
self.setWidgetsFromGladefile()
self._mainWindow.show()
self.index = 0
self.datalen = 225
self.pdata = [0] * self.datalen
self.gc = None
self.widget = None
def setWidgetsFromGladefile(self):
widgets = ( 'mainWindow', 'heartRateLabel',
'spo2_graph', 'statusBar' )
gw = self.glade.get_widget
for widgetName in widgets:
setattr(self, "_" + widgetName, gw(widgetName))
self._statusContext = self._statusBar.get_context_id("Main Window")
self._heartRateLabel.modify_font(pango.FontDescription("sans 20"))
def statusMsg(self,text):
self._statusBar.push(self._statusContext,text)
def add_pleth_data(self,seqno,pleth_list):
self.statusMsg( "Message: %d" % seqno )
for p in pleth_list:
self.pdata[self.index] = p
self.index = (self.index + 1) % self.datalen
if self.widget:
self.widget.queue_draw()
def on_spo2_graph_expose_event(self, widget, event):
if not self.gc:
self.widget = widget
self.gc = widget.window.new_gc()
self.gc.set_line_attributes(1, gtk.gdk.LINE_SOLID,
gtk.gdk.CAP_ROUND, gtk.gdk.JOIN_ROUND)
self.red_gc = widget.window.new_gc()
self.yellow_gc = widget.window.new_gc()
self.green_gc = widget.window.new_gc()
colormap = widget.get_colormap()
red = colormap.alloc_color( 65535, 0, 0 )
yellow = colormap.alloc_color( 65535, 65535, 0 )
green = colormap.alloc_color( 0, 65536, 0 )
self.red_gc.set_foreground( red )
self.yellow_gc.set_foreground( yellow )
self.green_gc.set_foreground( green )
r = widget.get_allocation()
xscale = float(r.width) / (len(self.pdata) - 1)
yscale = float(r.height) / (self.max - self.min)
ymax = self.max
ymin = self.min
for i in range(0,self.datalen - 1):
widget.window.draw_line(self.gc,
int(xscale * i),
int(yscale * (self.pdata[(self.index + i) % self.datalen] - ymin)),
int(xscale * (i+1)),
int(yscale * (self.pdata[(self.index + 1 + i) % self.datalen] - ymin)))
def on_mainWindow_delete_event(self, widget, data):
gtk.main_quit()
def on_mainWindow_destroy_event(self, widget):
gtk.main_quit()
###########################################################################################
def parse_host(host,default_port=5062):
'Parse strings of the form HOST[:PORT]'
port = default_port
if ':' in host:
host, port = host.split(':')
port = int(port)
return host, port
def usage():
print """
Usage: ekg.py [OPTIONS] HOST[:PORT]
Remote port defaults to 5062
Valid options are:
-v, --verbose May be repeated for more detail
-p, --protocol=NUM Protocol to use (1=fast)
-h, --help This help
"""
sys.exit(0)
if __name__ == '__main__':
import getopt
config = { 'verbose' : 0,
'remote_port' : 5062,
'protocol' : 1 }
try:
(options, argv) = getopt.getopt(sys.argv[1:], 'vhp:',
['verbose', 'help', 'protocol='])
except Exception, e:
print e
usage()
for (k,v) in options:
if k in ('-v', '--verbose'):
config['verbose'] += 1
elif k in ('-h', '--help'):
usage()
elif k in ('-p', '--protocol'):
config['protocol'] = int(v)
else:
print "I didn't understand that"
usage()
if len(argv) != 1:
print "must supply a host and an optional port"
usage()
host, remote_port = parse_host(argv[0],config['remote_port'])
if config['verbose']:
print 'Connecting to', host, remote_port, "protocol", config['protocol']
mw = MainWindow()
rtpc = SIPLiteTestClient(host,remote_port,config['protocol'],mw)
reactor.listenUDP( 0, rtpc )
reactor.run()
|
saurabh6790/tru_app_back | refs/heads/master | hr/doctype/salary_slip_deduction/salary_slip_deduction.py | 1946 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl |
rishig/zulip | refs/heads/master | zerver/lib/profile.py | 17 |
import cProfile
from functools import wraps
from typing import Any, TypeVar, Callable
ReturnT = TypeVar('ReturnT')
def profiled(func: Callable[..., ReturnT]) -> Callable[..., ReturnT]:
"""
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a backend test and wrap the
test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests/test_external.py and place @profiled above the test case below
./tools/test-backend zerver.tests.test_external.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results test_ratelimit_decrease.profile
"""
@wraps(func)
def wrapped_func(*args: Any, **kwargs: Any) -> ReturnT:
fn = func.__name__ + ".profile"
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs) # type: ReturnT
prof.dump_stats(fn)
return retval
return wrapped_func
|
franek/weboob | refs/heads/master | modules/dresdenwetter/backend.py | 1 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2013 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .browser import DresdenWetterBrowser
from weboob.capabilities.gauge import ICapGauge, GaugeSensor, Gauge,\
SensorNotFound
from weboob.tools.backend import BaseBackend
__all__ = ['DresdenWetterBackend']
class DresdenWetterBackend(BaseBackend, ICapGauge):
NAME = 'dresdenwetter'
MAINTAINER = u'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.f'
LICENSE = 'AGPLv3+'
DESCRIPTION = u"Private wetter station Dresden"
BROWSER = DresdenWetterBrowser
def iter_gauges(self, pattern=None):
if pattern is None or pattern.lower() in u"dresden"\
or pattern.lower() in "weather":
gauge = Gauge("private-dresden")
gauge.name = u"Private Wetterstation Dresden"
gauge.city = u"Dresden"
gauge.object = u"Weather"
gauge.sensors = self.browser.get_sensors_list()
yield gauge
def _get_gauge_by_id(self, id):
for gauge in self.iter_gauges():
if id == gauge.id:
return gauge
return None
def _get_sensor_by_id(self, id):
for gauge in self.iter_gauges():
for sensor in gauge.sensors:
if id == sensor.id:
return sensor
return None
def iter_sensors(self, gauge, pattern=None):
if not isinstance(gauge, Gauge):
gauge = self._get_gauge_by_id(gauge)
if pattern is None:
for sensor in gauge.sensors:
yield sensor
else:
lowpattern = pattern.lower()
for sensor in gauge.sensors:
if lowpattern in sensor.name.lower():
yield sensor
# Not in the website
def iter_gauge_history(self, sensor):
raise NotImplementedError()
def get_last_measure(self, sensor):
if not isinstance(sensor, GaugeSensor):
sensor = self._get_sensor_by_id(sensor)
if sensor is None:
raise SensorNotFound()
return sensor.lastvalue
|
Dolphman/lazyns | refs/heads/master | tests/test_api.py | 386048 | |
modulexcite/PTVS | refs/heads/master | Python/Tests/TestData/Grammar/FromImportStmt.py | 18 | from sys import winver
from sys import winver as baz
from sys.fob import winver
from sys.fob import winver as baz
from ...fob import oar
from ....fob import oar
from ......fob import oar
from .......fob import oar
from fob import (fob as oar, baz as quox) |
NeovaHealth/odoo | refs/heads/8.0 | addons/edi/controllers/main.py | 374 | import werkzeug.urls
import openerp
import openerp.addons.web.controllers.main as webmain
class EDI(openerp.http.Controller):
@openerp.http.route('/edi/import_url', type='http', auth='none')
def import_url(self, url):
# http://hostname:8069/edi/import_url?url=URIEncodedURL
req = openerp.http.request
# `url` may contain a full URL with a valid query string, we basically want to watch out for XML brackets and double-quotes
safe_url = werkzeug.url_quote_plus(url,':/?&;=')
values = dict(init='s.edi.edi_import("%s");' % safe_url)
return req.render('web.webclient_bootstrap', values)
@openerp.http.route('/edi/import_edi_url', type='json', auth='none')
def import_edi_url(self, url):
req = openerp.http.request
result = req.session.proxy('edi').import_edi_url(req.session._db, req.session._uid, req.session._password, url)
if len(result) == 1:
return {"action": webmain.clean_action(req, result[0][2])}
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bulldy80/gyp_unofficial | refs/heads/master | test/win/gyptest-link-tsaware.py | 269 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure tsaware setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('tsaware.gyp', chdir=CHDIR)
test.build('tsaware.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
# Explicitly off, should not be marked NX compatiable.
if 'Terminal Server Aware' in GetHeaders('test_tsaware_no.exe'):
test.fail_test()
# Explicitly on.
if 'Terminal Server Aware' not in GetHeaders('test_tsaware_yes.exe'):
test.fail_test()
test.pass_test()
|
VaneCloud/horizon | refs/heads/stable/kilo | openstack_dashboard/dashboards/project/images/panel.py | 46 | # Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Images(horizon.Panel):
name = _("Images")
slug = 'images'
permissions = ('openstack.services.image',)
dashboard.Project.register(Images)
|
mhils/countershape | refs/heads/master | countershape/analysis.py | 2 | import sys
import os
from . import blog
def nicepath(b):
cwdl = len(os.getcwd())
return "." + b.src[cwdl:]
def blog_tags(d, fp=sys.stdout):
b = blog.find_blog(d)
histogram = dict()
for i in b.blogdir.sortedPosts():
for t in i.tags:
histogram[t] = histogram.get(t, 0) + 1
vals = [(v, k) for (k, v) in histogram.items()]
vals.sort(reverse=True)
for i in vals:
print >> fp, "%5i"%i[0], i[1]
def blog_notags(d, fp=sys.stdout):
b = blog.find_blog(d)
for i in b.blogdir.sortedPosts():
if not i.tags:
print >> fp, nicepath(i)
def blog_has_option(d, option, fp=sys.stdout):
cwdl = len(os.getcwd())
b = blog.find_blog(d)
for i in b.blogdir.sortedPosts():
if option in i.options:
print >> fp, nicepath(i)
def blog_has_no_option(d, option, fp=sys.stdout):
cwdl = len(os.getcwd())
b = blog.find_blog(d)
for i in b.blogdir.sortedPosts():
if option not in i.options:
print >> fp, nicepath(i)
|
knehez/edx-platform | refs/heads/memooc | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 48 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
import httpretty
from mock import patch
from student.tests.factories import UserFactory
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
import unittest
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
TPA_TESTSHIB_LOGIN_URL = '/auth/login/tpa-saml/?auth_entry=login&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_REGISTER_URL = '/auth/login/tpa-saml/?auth_entry=register&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_COMPLETE_URL = '/auth/complete/tpa-saml/'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self._read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_register(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
# The user goes to the register page, and sees a button to register with TestShib:
self._check_register_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_REGISTER_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', register_response.content)
self.assertIn('"errorMessage": null', register_response.content)
# Now do a crude check that the data (e.g. email) from the provider is displayed in the form:
self.assertIn('"defaultValue": "myself@testshib.org"', register_response.content)
self.assertIn('"defaultValue": "Me Myself And I"', register_response.content)
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
{
'email': 'myself@testshib.org',
'name': 'Myself',
'username': 'myself',
'honor_code': True,
}
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._verify_user_email('myself@testshib.org')
self._test_return_login()
def test_login(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
user = UserFactory.create()
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', login_response.content)
self.assertIn('"errorMessage": null', login_response.content)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email': user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def _test_return_login(self):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
login_response = self._fake_testshib_login_and_return()
# There will be one weird redirect required to set the login cookie:
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + TPA_TESTSHIB_COMPLETE_URL)
# And then we should be redirected to the dashboard:
login_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + reverse('dashboard'))
# Now we are logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 200)
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _check_login_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(TPA_TESTSHIB_LOGIN_URL.replace('&', '&'), response.content)
return response
def _check_register_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.register_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(TPA_TESTSHIB_REGISTER_URL.replace('&', '&'), response.content)
return response
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def _fake_testshib_login_and_return(self):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
return self.client.post(
TPA_TESTSHIB_COMPLETE_URL,
content_type='application/x-www-form-urlencoded',
data=self._read_data_file('testshib_response.txt'),
)
def _verify_user_email(self, email):
""" Mark the user with the given email as verified """
user = User.objects.get(email=email)
user.is_active = True
user.save()
|
guoyu07/metagoofil | refs/heads/master | hachoir_parser/video/flv.py | 95 | """
FLV video parser.
Documentation:
- FLV File format: http://osflash.org/flv
- libavformat from ffmpeg project
- flashticle: Python project to read Flash (SWF and FLV with AMF metadata)
http://undefined.org/python/#flashticle
Author: Victor Stinner
Creation date: 4 november 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt8, UInt24, UInt32, NullBits, NullBytes,
Bit, Bits, String, RawBytes, Enum)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_parser.audio.mpeg_audio import Frame
from hachoir_parser.video.amf import AMFObject
from hachoir_core.tools import createDict
SAMPLING_RATE = {
0: ( 5512, "5.5 kHz"),
1: (11025, "11 kHz"),
2: (22050, "22.1 kHz"),
3: (44100, "44.1 kHz"),
}
SAMPLING_RATE_VALUE = createDict(SAMPLING_RATE, 0)
SAMPLING_RATE_TEXT = createDict(SAMPLING_RATE, 1)
AUDIO_CODEC_MP3 = 2
AUDIO_CODEC_NAME = {
0: u"Uncompressed",
1: u"ADPCM",
2: u"MP3",
5: u"Nellymoser 8kHz mono",
6: u"Nellymoser",
}
VIDEO_CODEC_NAME = {
2: u"Sorensen H.263",
3: u"Screen video",
4: u"On2 VP6",
}
FRAME_TYPE = {
1: u"keyframe",
2: u"inter frame",
3: u"disposable inter frame",
}
class Header(FieldSet):
def createFields(self):
yield String(self, "signature", 3, "FLV format signature", charset="ASCII")
yield UInt8(self, "version")
yield NullBits(self, "reserved[]", 5)
yield Bit(self, "type_flags_audio")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "type_flags_video")
yield UInt32(self, "data_offset")
def parseAudio(parent, size):
yield Enum(Bits(parent, "codec", 4, "Audio codec"), AUDIO_CODEC_NAME)
yield Enum(Bits(parent, "sampling_rate", 2, "Sampling rate"), SAMPLING_RATE_TEXT)
yield Bit(parent, "is_16bit", "16-bit or 8-bit per sample")
yield Bit(parent, "is_stereo", "Stereo or mono channel")
size -= 1
if 0 < size:
if parent["codec"].value == AUDIO_CODEC_MP3 :
yield Frame(parent, "music_data", size=size*8)
else:
yield RawBytes(parent, "music_data", size)
def parseVideo(parent, size):
yield Enum(Bits(parent, "frame_type", 4, "Frame type"), FRAME_TYPE)
yield Enum(Bits(parent, "codec", 4, "Video codec"), VIDEO_CODEC_NAME)
if 1 < size:
yield RawBytes(parent, "data", size-1)
def parseAMF(parent, size):
while parent.current_size < parent.size:
yield AMFObject(parent, "entry[]")
class Chunk(FieldSet):
tag_info = {
8: ("audio[]", parseAudio, ""),
9: ("video[]", parseVideo, ""),
18: ("metadata", parseAMF, ""),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (11 + self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
else:
self.parser = None
def createFields(self):
yield UInt8(self, "tag")
yield UInt24(self, "size", "Content size")
yield UInt24(self, "timestamp", "Timestamp in millisecond")
yield NullBytes(self, "reserved", 4)
size = self["size"].value
if size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "content", size)
def getSampleRate(self):
try:
return SAMPLING_RATE_VALUE[self["sampling_rate"].value]
except LookupError:
return None
class FlvFile(Parser):
PARSER_TAGS = {
"id": "flv",
"category": "video",
"file_ext": ("flv",),
"mime": (u"video/x-flv",),
"min_size": 9*4,
"magic": (
# Signature, version=1, flags=5 (video+audio), header size=9
("FLV\1\x05\0\0\0\x09", 0),
# Signature, version=1, flags=5 (video), header size=9
("FLV\1\x01\0\0\0\x09", 0),
),
"description": u"Macromedia Flash video"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 3) != "FLV":
return "Wrong file signature"
if self["header/data_offset"].value != 9:
return "Unknown data offset in main header"
return True
def createFields(self):
yield Header(self, "header")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
while not self.eof:
yield Chunk(self, "chunk[]")
yield UInt32(self, "prev_size[]", "Size of previous chunk")
def createDescription(self):
return u"Macromedia Flash video version %s" % self["header/version"].value
|
team-mayes/md_utils | refs/heads/master | tests/test_wham_block.py | 2 | # coding=utf-8
"""
Tests for wham_block.
"""
import filecmp
import inspect
import logging
import unittest
import tempfile
import shutil
import os
import md_utils
from md_utils.wham import DEF_BASE_SUBMIT_TPL
from md_utils.wham import DEF_LINE_SUBMIT_TPL
from md_utils.wham_block import (pair_avg, rmsd_avg, block_average, main)
from tests.test_wham import (META_PATH, EVEN_DATA, ODD_DATA,
ODD_KEY, EVEN_KEY)
__author__ = 'cmayes'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Directories #
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
SUB_DATA_DIR = os.path.join(DATA_DIR, 'wham_test_data')
GOOD_OUT_DIR = os.path.join(SUB_DATA_DIR, 'good_block_out')
# The directory of the md_utils base package
MD_UTILS_BASE = os.path.dirname(inspect.getfile(md_utils))
SKEL_LOC = os.path.join(MD_UTILS_BASE, "skel")
TPL_LOC = os.path.join(SKEL_LOC, "tpl")
SUB_WHAM_BASE_TPL = os.path.join(TPL_LOC, DEF_BASE_SUBMIT_TPL)
SUB_WHAM_LINE_TPL = os.path.join(TPL_LOC, DEF_LINE_SUBMIT_TPL)
EVEN_PAIR_AVG = [1.2311619999999999, 1.220716, 1.2131370000000001,
1.1924375, 1.262987]
ODD_PAIR_AVG = [1.2474835, 1.2110025, 1.2557155, 1.1911665, 1.228554,
1.2435805, 1.25972]
TEST_RMSD = {EVEN_KEY: EVEN_DATA, ODD_KEY: ODD_DATA}
# Tests #
class TestAveraging(unittest.TestCase):
def testEvenPairs(self):
avg = pair_avg(EVEN_DATA)
self.assertAlmostEqual(EVEN_PAIR_AVG, avg)
def testOddPairs(self):
avg = pair_avg(ODD_DATA)
self.assertAlmostEqual(ODD_PAIR_AVG, avg)
class RmsdAverage(unittest.TestCase):
def testRmsdAveragePair(self):
avg = rmsd_avg(TEST_RMSD, pair_avg)
self.assertAlmostEqual(EVEN_PAIR_AVG, avg[EVEN_KEY])
self.assertAlmostEqual(ODD_PAIR_AVG, avg[ODD_KEY])
class BlockAverage(unittest.TestCase):
def testBlockAverage(self):
directory_name = None
try:
directory_name = tempfile.mkdtemp()
block_average(META_PATH, 12, tpl_dir=TPL_LOC, base_dir=directory_name)
dir_cmp = (filecmp.dircmp(directory_name, GOOD_OUT_DIR))
self.assertFalse(dir_cmp.diff_files)
finally:
shutil.rmtree(directory_name)
class TestMain(unittest.TestCase):
def testDefaults(self):
main([]) |
vlinhd11/vlinhd11-android-scripting | refs/heads/master | python-build/python-libs/ase/scripts/say_chat.py | 40 | """Say chat messages aloud as they are received."""
__author__ = 'Damon Kohler <damonkohler@gmail.com>'
__copyright__ = 'Copyright (c) 2009, Google Inc.'
__license__ = 'Apache License, Version 2.0'
import android
import xmpp
_SERVER = 'talk.google.com', 5223
def log(droid, message):
print message
self.droid.ttsSpeak(message)
class SayChat(object):
def __init__(self):
self.droid = android.Android()
username = self.droid.getInput('Username').result
password = self.droid.getInput('Password').result
jid = xmpp.protocol.JID(username)
self.client = xmpp.Client(jid.getDomain(), debug=[])
self.client.connect(server=_SERVER)
self.client.RegisterHandler('message', self.message_cb)
if not self.client:
log('Connection failed!')
return
auth = self.client.auth(jid.getNode(), password, 'botty')
if not auth:
log('Authentication failed!')
return
self.client.sendInitPresence()
def message_cb(self, session, message):
jid = xmpp.protocol.JID(message.getFrom())
username = jid.getNode()
text = message.getBody()
self.droid.ttsSpeak('%s says %s' % (username, text))
def run(self):
try:
while True:
self.client.Process(1)
except KeyboardInterrupt:
pass
saychat = SayChat()
saychat.run()
|
Gentux/imap-cli | refs/heads/master | imap_cli/const.py | 1 | # -*- coding: utf-8 -*-
"""Constant used by Imap_CLI packages """
# IMAP CLI version number
VERSION = 0.7
# IMAP Constant
#
# All those value are documented in RFC 3501
# http://tools.ietf.org/html/rfc3501#section-2.3.2
DEFAULT_DIRECTORY = 'INBOX'
DEFAULT_PORT = 143
DEFAULT_SSL_PORT = 993
STATUS_OK = 'OK'
IMAP_SPECIAL_FLAGS = [
'ANSWERED',
'DELETED',
'DRAFT',
'FLAGGED',
'RECENT',
'SEEN',
'UNSEEN',
]
FLAG_DELETED = r'\Deleted'
FLAG_SEEN = r'\Seen'
FLAG_ANSWERED = r'\Answered'
FLAG_FLAGGED = r'\Flagged'
FLAG_DRAFT = r'\Draft'
FLAG_RECENT = r'\Recent'
MESSAGE_PARTS = [
'BODY',
'BODYSTRUCTURE',
'ENVELOPE',
'FLAGS',
'INTERNALDATE',
'RFC822',
# NOTE(gentux) Functionally equivalent to BODY[], differing in the syntax
# of the resulting untagged FETCH data.
'RFC822.HEADER',
'RFC822.SIZE',
'RFC822.TEXT',
'UID',
]
SEARH_CRITERION = [
'ALL',
'ANSWERED',
'BCC <string>',
'BEFORE <date>',
'BODY <string>',
'CC <string>',
'DELETED',
'DRAFT',
'FLAGGED',
'FROM <string>',
'HEADER <field-name> <string>',
'KEYWORD <flag>',
'LARGER <n>',
'NEW',
'NOT <search-key>',
'OLD',
'ON <date>',
'OR <search-key1> <search-key2>',
'RECENT',
'SEEN',
'SENTBEFORE <date>',
'SENTON <date>',
'SENTSINCE <date>',
'SINCE <date>',
'SMALLER <n>',
'SUBJECT <string>',
'TEXT <string>',
'TO <string>',
'UID <sequence set>',
'UNANSWERED',
'UNDELETED',
'UNDRAFT',
'UNFLAGGED',
'UNKEYWORD <flag>',
'UNSEEN',
]
# This SASL XOAUTH2 initial client response is documented in the Gmail API
# https://developers.google.com/gmail/imap/xoauth2-protocol#initial_client_response
SASL_XOAUTH2_IR = 'user={}\x01auth=Bearer {}\x01\x01'
# CLI Constant
DEFAULT_CONFIG_FILE = '~/.config/imap-cli'
|
Jorge-Rodriguez/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_firewall_proxy_policy.py | 24 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_proxy_policy
short_description: Configure proxy policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and proxy_policy category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_proxy_policy:
description:
- Configure proxy policies.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
action:
description:
- Accept or deny traffic matching the policy parameters.
choices:
- accept
- deny
- redirect
application-list:
description:
- Name of an existing Application list. Source application.list.name.
av-profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
comments:
description:
- Optional comments.
disclaimer:
description:
- "Web proxy disclaimer setting: by domain, policy, or user."
choices:
- disable
- domain
- policy
- user
dlp-sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
dstaddr:
description:
- Destination address objects.
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name
firewall.vip.name firewall.vipgrp.name firewall.vip46.name firewall.vipgrp46.name system.external-resource.name.
required: true
dstaddr-negate:
description:
- When enabled, destination addresses match against any address EXCEPT the specified destination addresses.
choices:
- enable
- disable
dstaddr6:
description:
- IPv6 destination address objects.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name firewall.vip64.name
firewall.vipgrp64.name system.external-resource.name.
required: true
dstintf:
description:
- Destination interface names.
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
global-label:
description:
- Global web-based manager visible label.
groups:
description:
- Names of group objects.
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
http-tunnel-auth:
description:
- Enable/disable HTTP tunnel authentication.
choices:
- enable
- disable
icap-profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
internet-service:
description:
- Enable/disable use of Internet Services for this policy. If enabled, destination address and service are not used.
choices:
- enable
- disable
internet-service-custom:
description:
- Custom Internet Service name.
suboptions:
name:
description:
- Custom name. Source firewall.internet-service-custom.name.
required: true
internet-service-id:
description:
- Internet Service ID.
suboptions:
id:
description:
- Internet Service ID. Source firewall.internet-service.id.
required: true
internet-service-negate:
description:
- When enabled, Internet Services match against any internet service EXCEPT the selected Internet Service.
choices:
- enable
- disable
ips-sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
label:
description:
- VDOM-specific GUI visible label.
logtraffic:
description:
- Enable/disable logging traffic through the policy.
choices:
- all
- utm
- disable
logtraffic-start:
description:
- Enable/disable policy log traffic start.
choices:
- enable
- disable
policyid:
description:
- Policy ID.
required: true
poolname:
description:
- Name of IP pool object.
suboptions:
name:
description:
- IP pool name. Source firewall.ippool.name.
required: true
profile-group:
description:
- Name of profile group. Source firewall.profile-group.name.
profile-protocol-options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
profile-type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
choices:
- single
- group
proxy:
description:
- Type of explicit proxy.
choices:
- explicit-web
- transparent-web
- ftp
- ssh
- ssh-tunnel
- wanopt
redirect-url:
description:
- Redirect URL for further explicit web proxy processing.
replacemsg-override-group:
description:
- Authentication replacement message override group. Source system.replacemsg-group.name.
scan-botnet-connections:
description:
- Enable/disable scanning of connections to Botnet servers.
choices:
- disable
- block
- monitor
schedule:
description:
- Name of schedule object. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
service:
description:
- Name of service objects.
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
service-negate:
description:
- When enabled, services match against any service EXCEPT the specified destination services.
choices:
- enable
- disable
session-ttl:
description:
- TTL in seconds for sessions accepted by this policy (0 means use the system default session TTL).
spamfilter-profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
srcaddr:
description:
- Source address objects (must be set when using Web proxy).
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name system
.external-resource.name.
required: true
srcaddr-negate:
description:
- When enabled, source addresses match against any address EXCEPT the specified source addresses.
choices:
- enable
- disable
srcaddr6:
description:
- IPv6 source address objects.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name system.external-resource.name.
required: true
srcintf:
description:
- Source interface names.
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
ssh-filter-profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
ssl-ssh-profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
status:
description:
- Enable/disable the active status of the policy.
choices:
- enable
- disable
transparent:
description:
- Enable to use the IP address of the client to connect to the server.
choices:
- enable
- disable
users:
description:
- Names of user objects.
suboptions:
name:
description:
- Group name. Source user.local.name.
required: true
utm-status:
description:
- Enable the use of UTM profiles/sensors/lists.
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
waf-profile:
description:
- Name of an existing Web application firewall profile. Source waf.profile.name.
webcache:
description:
- Enable/disable web caching.
choices:
- enable
- disable
webcache-https:
description:
- Enable/disable web caching for HTTPS (Requires deep-inspection enabled in ssl-ssh-profile).
choices:
- disable
- enable
webfilter-profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
webproxy-forward-server:
description:
- Name of web proxy forward server. Source web-proxy.forward-server.name web-proxy.forward-server-group.name.
webproxy-profile:
description:
- Name of web proxy profile. Source web-proxy.profile.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure proxy policies.
fortios_firewall_proxy_policy:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_proxy_policy:
state: "present"
action: "accept"
application-list: "<your_own_value> (source application.list.name)"
av-profile: "<your_own_value> (source antivirus.profile.name)"
comments: "<your_own_value>"
disclaimer: "disable"
dlp-sensor: "<your_own_value> (source dlp.sensor.name)"
dstaddr:
-
name: "default_name_10 (source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name firewall.vip
.name firewall.vipgrp.name firewall.vip46.name firewall.vipgrp46.name system.external-resource.name)"
dstaddr-negate: "enable"
dstaddr6:
-
name: "default_name_13 (source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name firewall.vip64.name firewall
.vipgrp64.name system.external-resource.name)"
dstintf:
-
name: "default_name_15 (source system.interface.name system.zone.name)"
global-label: "<your_own_value>"
groups:
-
name: "default_name_18 (source user.group.name)"
http-tunnel-auth: "enable"
icap-profile: "<your_own_value> (source icap.profile.name)"
internet-service: "enable"
internet-service-custom:
-
name: "default_name_23 (source firewall.internet-service-custom.name)"
internet-service-id:
-
id: "25 (source firewall.internet-service.id)"
internet-service-negate: "enable"
ips-sensor: "<your_own_value> (source ips.sensor.name)"
label: "<your_own_value>"
logtraffic: "all"
logtraffic-start: "enable"
policyid: "31"
poolname:
-
name: "default_name_33 (source firewall.ippool.name)"
profile-group: "<your_own_value> (source firewall.profile-group.name)"
profile-protocol-options: "<your_own_value> (source firewall.profile-protocol-options.name)"
profile-type: "single"
proxy: "explicit-web"
redirect-url: "<your_own_value>"
replacemsg-override-group: "<your_own_value> (source system.replacemsg-group.name)"
scan-botnet-connections: "disable"
schedule: "<your_own_value> (source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name)"
service:
-
name: "default_name_43 (source firewall.service.custom.name firewall.service.group.name)"
service-negate: "enable"
session-ttl: "45"
spamfilter-profile: "<your_own_value> (source spamfilter.profile.name)"
srcaddr:
-
name: "default_name_48 (source firewall.address.name firewall.addrgrp.name firewall.proxy-address.name firewall.proxy-addrgrp.name system
.external-resource.name)"
srcaddr-negate: "enable"
srcaddr6:
-
name: "default_name_51 (source firewall.address6.name firewall.addrgrp6.name system.external-resource.name)"
srcintf:
-
name: "default_name_53 (source system.interface.name system.zone.name)"
ssh-filter-profile: "<your_own_value> (source ssh-filter.profile.name)"
ssl-ssh-profile: "<your_own_value> (source firewall.ssl-ssh-profile.name)"
status: "enable"
transparent: "enable"
users:
-
name: "default_name_59 (source user.local.name)"
utm-status: "enable"
uuid: "<your_own_value>"
waf-profile: "<your_own_value> (source waf.profile.name)"
webcache: "enable"
webcache-https: "disable"
webfilter-profile: "<your_own_value> (source webfilter.profile.name)"
webproxy-forward-server: "<your_own_value> (source web-proxy.forward-server.name web-proxy.forward-server-group.name)"
webproxy-profile: "<your_own_value> (source web-proxy.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_proxy_policy_data(json):
option_list = ['action', 'application-list', 'av-profile',
'comments', 'disclaimer', 'dlp-sensor',
'dstaddr', 'dstaddr-negate', 'dstaddr6',
'dstintf', 'global-label', 'groups',
'http-tunnel-auth', 'icap-profile', 'internet-service',
'internet-service-custom', 'internet-service-id', 'internet-service-negate',
'ips-sensor', 'label', 'logtraffic',
'logtraffic-start', 'policyid', 'poolname',
'profile-group', 'profile-protocol-options', 'profile-type',
'proxy', 'redirect-url', 'replacemsg-override-group',
'scan-botnet-connections', 'schedule', 'service',
'service-negate', 'session-ttl', 'spamfilter-profile',
'srcaddr', 'srcaddr-negate', 'srcaddr6',
'srcintf', 'ssh-filter-profile', 'ssl-ssh-profile',
'status', 'transparent', 'users',
'utm-status', 'uuid', 'waf-profile',
'webcache', 'webcache-https', 'webfilter-profile',
'webproxy-forward-server', 'webproxy-profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_proxy_policy(data, fos):
vdom = data['vdom']
firewall_proxy_policy_data = data['firewall_proxy_policy']
filtered_data = filter_firewall_proxy_policy_data(firewall_proxy_policy_data)
if firewall_proxy_policy_data['state'] == "present":
return fos.set('firewall',
'proxy-policy',
data=filtered_data,
vdom=vdom)
elif firewall_proxy_policy_data['state'] == "absent":
return fos.delete('firewall',
'proxy-policy',
mkey=filtered_data['policyid'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_proxy_policy']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_proxy_policy": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"action": {"required": False, "type": "str",
"choices": ["accept", "deny", "redirect"]},
"application-list": {"required": False, "type": "str"},
"av-profile": {"required": False, "type": "str"},
"comments": {"required": False, "type": "str"},
"disclaimer": {"required": False, "type": "str",
"choices": ["disable", "domain", "policy",
"user"]},
"dlp-sensor": {"required": False, "type": "str"},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dstaddr-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstaddr6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dstintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"global-label": {"required": False, "type": "str"},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"http-tunnel-auth": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"icap-profile": {"required": False, "type": "str"},
"internet-service": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"internet-service-custom": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet-service-id": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"internet-service-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ips-sensor": {"required": False, "type": "str"},
"label": {"required": False, "type": "str"},
"logtraffic": {"required": False, "type": "str",
"choices": ["all", "utm", "disable"]},
"logtraffic-start": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"policyid": {"required": True, "type": "int"},
"poolname": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"profile-group": {"required": False, "type": "str"},
"profile-protocol-options": {"required": False, "type": "str"},
"profile-type": {"required": False, "type": "str",
"choices": ["single", "group"]},
"proxy": {"required": False, "type": "str",
"choices": ["explicit-web", "transparent-web", "ftp",
"ssh", "ssh-tunnel", "wanopt"]},
"redirect-url": {"required": False, "type": "str"},
"replacemsg-override-group": {"required": False, "type": "str"},
"scan-botnet-connections": {"required": False, "type": "str",
"choices": ["disable", "block", "monitor"]},
"schedule": {"required": False, "type": "str"},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"service-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"session-ttl": {"required": False, "type": "int"},
"spamfilter-profile": {"required": False, "type": "str"},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"srcaddr6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"ssh-filter-profile": {"required": False, "type": "str"},
"ssl-ssh-profile": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"transparent": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"users": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"utm-status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uuid": {"required": False, "type": "str"},
"waf-profile": {"required": False, "type": "str"},
"webcache": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"webcache-https": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"webfilter-profile": {"required": False, "type": "str"},
"webproxy-forward-server": {"required": False, "type": "str"},
"webproxy-profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
dednal/chromium.src | refs/heads/nw12 | chrome/common/extensions/docs/server2/github_file_system_test.py | 97 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from appengine_wrappers import files
from fake_fetchers import ConfigureFakeFetchers
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
from test_util import Server2Path
class GithubFileSystemTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._base_path = Server2Path('test_data', 'github_file_system')
self._file_system = GithubFileSystem.CreateChromeAppsSamples(
ObjectStoreCreator.ForTest())
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testList(self):
self.assertEqual(json.loads(self._ReadLocalFile('expected_list.json')),
self._file_system.Read(['']).Get())
def testRead(self):
self.assertEqual(self._ReadLocalFile('expected_read.txt'),
self._file_system.ReadSingle('analytics/launch.js').Get())
def testStat(self):
self.assertEqual(0, self._file_system.Stat('zipball').version)
def testKeyGeneration(self):
self.assertEqual(0, len(files.GetBlobKeys()))
self._file_system.ReadSingle('analytics/launch.js').Get()
self.assertEqual(1, len(files.GetBlobKeys()))
self._file_system.ReadSingle('analytics/main.css').Get()
self.assertEqual(1, len(files.GetBlobKeys()))
if __name__ == '__main__':
unittest.main()
|
keedio/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/template_tests/views.py | 206 | # Fake views for testing url reverse lookup
from django.http import HttpResponse
from django.template.response import TemplateResponse
def index(request):
pass
def client(request, id):
pass
def client_action(request, id, action):
pass
def client2(request, tag):
pass
def template_response_view(request):
return TemplateResponse(request, 'response.html', {})
def snark(request):
return HttpResponse('Found him!')
|
pwarren/AGDeviceControl | refs/heads/master | agdevicecontrol/thirdparty/site-packages/darwin/twisted/test/ssl_helpers.py | 81 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import ssl
from twisted.python.util import sibpath
from OpenSSL import SSL
class ClientTLSContext(ssl.ClientContextFactory):
isClient = 1
def getContext(self):
return SSL.Context(SSL.TLSv1_METHOD)
class ServerTLSContext:
isClient = 0
def __init__(self, filename = sibpath(__file__, 'server.pem')):
self.filename = filename
def getContext(self):
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.use_certificate_file(self.filename)
ctx.use_privatekey_file(self.filename)
return ctx
|
FrenchFriesKetchup/p2pool | refs/heads/master | nattraverso/pynupnp/upnpxml.py | 288 | """
This module parse an UPnP device's XML definition in an Object.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from xml.dom import minidom
import logging
# Allowed UPnP services to use when mapping ports/external addresses
WANSERVICES = ['urn:schemas-upnp-org:service:WANIPConnection:1',
'urn:schemas-upnp-org:service:WANPPPConnection:1']
class UPnPXml:
"""
This objects parses the XML definition, and stores the useful
results in attributes.
The device infos dictionnary may contain the following keys:
- friendlyname: A friendly name to call the device.
- manufacturer: A manufacturer name for the device.
Here are the different attributes:
- deviceinfos: A dictionnary of device infos as defined above.
- controlurl: The control url, this is the url to use when sending SOAP
requests to the device, relative to the base url.
- wanservice: The WAN service to be used, one of the L{WANSERVICES}
- urlbase: The base url to use when talking in SOAP to the device.
The full url to use is obtained by urljoin(urlbase, controlurl)
"""
def __init__(self, xml):
"""
Parse the given XML string for UPnP infos. This creates the attributes
when they are found, or None if no value was found.
@param xml: a xml string to parse
"""
logging.debug("Got UPNP Xml description:\n%s", xml)
doc = minidom.parseString(xml)
# Fetch various device info
self.deviceinfos = {}
try:
attributes = {
'friendlyname':'friendlyName',
'manufacturer' : 'manufacturer'
}
device = doc.getElementsByTagName('device')[0]
for name, tag in attributes.iteritems():
try:
self.deviceinfos[name] = device.getElementsByTagName(
tag)[0].firstChild.datas.encode('utf-8')
except:
pass
except:
pass
# Fetch device control url
self.controlurl = None
self.wanservice = None
for service in doc.getElementsByTagName('service'):
try:
stype = service.getElementsByTagName(
'serviceType')[0].firstChild.data.encode('utf-8')
if stype in WANSERVICES:
self.controlurl = service.getElementsByTagName(
'controlURL')[0].firstChild.data.encode('utf-8')
self.wanservice = stype
break
except:
pass
# Find base url
self.urlbase = None
try:
self.urlbase = doc.getElementsByTagName(
'URLBase')[0].firstChild.data.encode('utf-8')
except:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.