repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
openclimatedata/pymagicc | refs/heads/master | scripts/test_install.py | 1 | """Test that all of our modules can be imported
Thanks https://stackoverflow.com/a/25562415/10473080
"""
import importlib
import pkgutil
import pymagicc
def import_submodules(package_name):
package = importlib.import_module(package_name)
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
importlib.import_module(full_name)
if is_pkg:
import_submodules(full_name)
import_submodules("pymagicc")
print(pymagicc.__version__)
|
cboling/SDNdbg | refs/heads/master | docs/old-stuff/pydzcvr/doc/neutron/services/loadbalancer/drivers/embrane/db.py | 7 | # Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import models_v2 as nmodel
from neutron.services.loadbalancer.drivers.embrane import models
def add_pool_port(context, pool_id, port_id):
session = context.session
with session.begin(subtransactions=True):
pool_port = models.PoolPort()
pool_port.pool_id = pool_id
pool_port.port_id = port_id
session.add(pool_port)
def get_pool_port(context, pool_id):
return (context.session.query(models.PoolPort).filter_by(pool_id=pool_id).
first())
def delete_pool_backend(context, pool_id):
session = context.session
backend = (session.query(models.PoolPort).filter_by(
pool_id=pool_id))
for b in backend:
delete_pool_port(context, b)
def delete_pool_port(context, backend_port):
session = context.session
with session.begin(subtransactions=True):
port = (session.query(nmodel.Port).filter_by(
id=backend_port['port_id'])).first()
if port:
session.delete(backend_port)
session.delete(port)
|
dut3062796s/PTVS | refs/heads/master | Python/Tests/TestData/Grammar/MixedWhitespace3.py | 18 | import wpf
from System.Windows import Application, Window
from System.Windows import Annotations
import array
class MyWindow(Window):
def __init__(self):
wpf.LoadComponent(self, 'WpfApplication45.xaml')
if __name__ == '__main__':
print('hello')
Application().Run(MyWindow())
|
1013553207/django | refs/heads/master | django/utils/ipv6.py | 208 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
|
OpenGov/og-python-utils | refs/heads/master | tests/text_regex_test.py | 1 | # This import fixes sys.path issues
from .parentpath import *
import unittest
import re
from ogutils.text import regex
class TextRegexTest(unittest.TestCase):
def test_bad_input_chained_regex(self):
with self.assertRaises(ValueError):
regex.chain_sub_regexes('', '.')
with self.assertRaises(TypeError):
regex.chain_sub_regexes(5, ('.', 'a'))
self.assertEquals(regex.chain_sub_regexes('foobar'), 'foobar')
def test_single_chained_regex(self):
self.assertEquals(regex.chain_sub_regexes('foobar', (re.compile('[a]+'), 'o')), 'foobor')
self.assertEquals(regex.chain_sub_regexes('foobar', ('na', 'o')), 'foobar')
self.assertEquals(regex.chain_sub_regexes('', ('.', 'o')), '')
self.assertEquals(regex.chain_sub_regexes('foobar', ('.', '1')), '111111')
self.assertEquals(regex.chain_sub_regexes('foobar', ('.+', '1')), '1')
def test_many_chained_regex(self):
self.assertEquals(regex.chain_sub_regexes('foobar', ('[a]+', 'o'), ('o', 'a')), 'faabar')
self.assertEquals(regex.chain_sub_regexes(
'foobar',
('.$', 'z'),
(re.compile('^.'), 'b')),
'boobaz')
self.assertEquals(regex.chain_sub_regexes(
'foobar',
('a', 'b'),
('b', 'c'),
('c', 'd'),
('d', 'e'),
('e', 'f')),
'fooffr')
if __name__ == "__main__":
unittest.main()
|
ilendl2/wagtail-cookiecutter-foundation | refs/heads/master | {{cookiecutter.project_slug}}/people/migrations/0005_personindexpage_feed_image.py | 3 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-09 18:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('people', '0004_personpage_email_2'),
]
operations = [
migrations.AddField(
model_name='personindexpage',
name='feed_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
|
jjas0nn/solvem | refs/heads/master | tensorflow/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.py | 66 | from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser, NoOptionError
else:
from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
|
pxmkuruc/usd-qt | refs/heads/master | pxr/usdQt/testenv/testUsdQtOpinionModel.py | 1 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from __future__ import print_function
import unittest2 as unittest
import os
import os.path
from pxr import Usd, UsdQt
from pxr.UsdQt._Qt import QtCore, QtWidgets
class TestOpinionModel(unittest.TestCase):
def setUp(self):
stageFilePath = "simple.usda"
stageFilePath = stageFilePath if os.path.isfile(stageFilePath) else \
os.path.join(os.path.splitext(__file__)[0], stageFilePath)
self.stage = Usd.Stage.Open(stageFilePath)
def testProperties(self):
prims = [self.stage.GetPrimAtPath(path) for path in
['/MyPrim1/Child1', '/MyPrim1/Child2', '/MyPrim1/Child3', '/MyPrim1/Child4']]
model = UsdQt.OpinionStandardModel(prims)
primIndex = model.index(0, 0, QtCore.QModelIndex())
proxy = model.GetProxyForIndex(primIndex)
self.assertEqual(proxy.GetNames(), [
'Child1', 'Child2', 'Child3', 'Child4'])
self.assertEqual(model.data(primIndex),
'Child1, Child2, Child3, Child4')
metadataGroupIndex = model.index(0, 0, primIndex)
attributeGroupIndex = model.index(1, 0, primIndex)
relationshipGroupIndex = model.index(2, 0, primIndex)
self.assertGreater(model.rowCount(metadataGroupIndex), 0)
self.assertEqual(model.rowCount(attributeGroupIndex), 2)
self.assertEqual(model.rowCount(relationshipGroupIndex), 1)
self.assertEqual(model.index(0, 0, attributeGroupIndex).data(), "x")
self.assertEqual(model.index(0, 1, attributeGroupIndex).data(), "")
self.assertEqual(model.index(0, 2, attributeGroupIndex).data(
QtCore.Qt.DisplayRole), "")
self.assertEqual(model.index(0, 2, attributeGroupIndex).data(
QtCore.Qt.EditRole), None)
self.assertEqual(model.index(1, 0, attributeGroupIndex).data(), "y")
self.assertEqual(model.index(1, 1, attributeGroupIndex).data(), "int")
self.assertEqual(model.index(1, 2, attributeGroupIndex).data(
QtCore.Qt.DisplayRole), "2")
self.assertEqual(model.index(
1, 2, attributeGroupIndex).data(QtCore.Qt.EditRole), 2)
self.assertEqual(model.index(
0, 0, relationshipGroupIndex).data(), "rel1")
def testMetadata(self):
prims = [self.stage.GetPrimAtPath(path) for path in
['/MyPrim1', '/MyPrim2']]
#model = UsdQt.OpinionStandardModel(prims)
def testInvalidSetData(self):
"""Ensure that indices are property cleaned when a bad setData occurs.
This can end up triggering a very hard to track down deferred crash
where persistent indices are created and not cleaned up."""
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
|
amiramix/serna-free | refs/heads/master | serna/dist/plugins/dita-examples/open_topicref.py | 5 | ##
## Copyright(c) 2009 Syntext, Inc. All Rights Reserved.
## Contact: info@syntext.com, http://www.syntext.com
##
## This file is part of Syntext Serna XML Editor.
##
## COMMERCIAL USAGE
## Licensees holding valid Syntext Serna commercial licenses may use this file
## in accordance with the Syntext Serna Commercial License Agreement provided
## with the software, or, alternatively, in accorance with the terms contained
## in a written agreement between you and Syntext, Inc.
##
## GNU GENERAL PUBLIC LICENSE USAGE
## Alternatively, this file may be used under the terms of the GNU General
## Public License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the packaging
## of this file. In addition, as a special exception, Syntext, Inc. gives you
## certain additional rights, which are described in the Syntext, Inc. GPL
## Exception for Syntext Serna Free Edition, included in the file
## GPL_EXCEPTION.txt in this package.
##
## You should have received a copy of appropriate licenses along with this
## package. If not, see <http://www.syntext.com/legal/>. If you are unsure
## which license is appropriate for your use, please contact the sales
## department at sales@syntext.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#
# This plugin example opens the document if user double-clicks within the
# DITA topicref element.
#
from SernaApi import *
import os
class TopicClickWatcher(SimpleWatcher):
def __init__(self, dp):
SimpleWatcher.__init__(self)
self.dp_ = dp
def notifyChanged(self):
se = self.dp_().sernaDoc().structEditor()
pos = se.getCheckedPos()
if not pos or pos.node().nodeName() != "topicref":
return 1
hrefatt = pos.node().asGroveElement().attrs().getAttribute("href")
if not hrefatt or hrefatt.value().isEmpty():
return 1
ptn = PropertyNode("doc-src-info")
# use source document path as a base path instead of current directory
path = os.path.dirname(str(se.sourceGrove().topSysid()))
path = os.path.join(path, str(hrefatt.value()))
# strip ID
if path.find('#') > 0:
path = path.split('#')[0]
ptn.makeDescendant("doc-src").setString(path)
self.dp_().executeCommandEvent("OpenDocumentWithDsi", ptn)
return 0
class OpenOnTopicRef(DocumentPlugin):
def __init__(self, a1, a2):
DocumentPlugin.__init__(self, a1, a2)
def postInit(self):
self.clickWatcher_ = TopicClickWatcher(ref(self))
self.sernaDoc().structEditor().setDoubleClickWatcher(self.clickWatcher_)
|
petecummings/django-cms | refs/heads/develop | cms/test_utils/project/pluginapp/plugins/manytomany_rel/migrations/0001_initial.py | 66 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('title', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ArticlePluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(primary_key=True, to='cms.CMSPlugin', auto_created=True, parent_link=True, serialize=False)),
('title', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('name', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='articlepluginmodel',
name='sections',
field=models.ManyToManyField(to='manytomany_rel.Section'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='section',
field=models.ForeignKey(to='manytomany_rel.Section'),
preserve_default=True,
),
]
|
albertoconnor/website | refs/heads/master | newsletter/templatetags/paths.py | 1 | # import re
from django import template
register = template.Library()
@register.filter
def paragraph_markup(text):
# new_text = re.sub(r'<p[^>]*>', '<p style=\"-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; margin: 5px 0; font-family: \'Droid Serif\', Georgia, serif; font-size: 16px; line-height: 120%;\">', text, 0)
new_text = text.replace('<p>', '').replace('</p>', '')
return new_text
@register.simple_tag
def base_styles(tag):
if tag == 'body':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; ' \
'height: 100% !important; margin: 0; padding: 0; ' \
'width: 100% !important;'
elif tag == '#bodyTable' or tag == '#bodyCell':
return 'height: 100% !important; margin: 0; padding: 0; ' \
'width: 100% !important;'
elif tag == 'table':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;' \
' border-collapse: collapse; mso-table-lspace: 0pt; ' \
'mso-table-rspace: 0pt;'
elif tag == 'td':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; ' \
'mso-table-lspace: 0pt; mso-table-rspace: 0pt;'
elif tag == 'a':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;'
elif tag == 'img':
return 'border: 0; outline: none; text-decoration: none; ' \
'-ms-interpolation-mode: bicubic;'
elif tag == 'p':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;'
elif tag == 'li':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;'
elif tag == 'blockquote':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;'
elif tag == 'h1':
return '-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;'
return ""
|
woltage/ansible-modules-core | refs/heads/devel | commands/command.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys
import datetime
import glob
import traceback
import re
import shlex
import os
DOCUMENTATION = '''
---
module: command
version_added: historical
short_description: Executes a command on a remote node
description:
- The M(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell)
module if you need these features).
options:
free_form:
description:
- the command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
default: null
aliases: []
creates:
description:
- a filename or glob pattern, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename or glob pattern, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
version_added: "0.6"
required: false
default: null
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
version_added: "1.8"
default: yes
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(shell) module instead. The
M(command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example from Ansible Playbooks.
- command: /sbin/shutdown -t now
# Run the command if the specified file does not exist.
- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# /path/to/database doesn't exist.
- command: /usr/bin/make_database.sh arg1 arg2
args:
chdir: somedir/
creates: /path/to/database
'''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. ({options_list})=
# ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
# 4. (.*?)
# ^ match everything in a non-greedy manner until...
# 5. (?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(
r'(^|\s)(' + OPTIONS_REGEX +
r')=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'
)
def check_command(commandline):
arguments = { 'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' }
commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url', 'wget': 'get_url',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
'rsync': 'synchronize' }
warnings = list()
command = os.path.basename(commandline.split()[0])
if command in arguments:
warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command))
if command in commands:
warnings.append("Consider using %s module rather than running %s" % (commands[command], command))
return warnings
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params = dict(),
_uses_shell = dict(type='bool', default=False),
chdir = dict(),
executable = dict(),
creates = dict(),
removes = dict(),
warn = dict(type='bool', default=True),
)
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
stderr=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
stderr=False,
rc=0
)
warnings = list()
if warn:
warnings = check_command(args)
if not shell:
args = shlex.split(args)
startd = datetime.datetime.now()
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
if err is None:
err = ''
module.exit_json(
cmd = args,
stdout = out.rstrip("\r\n"),
stderr = err.rstrip("\r\n"),
rc = rc,
start = str(startd),
end = str(endd),
delta = str(delta),
changed = True,
warnings = warnings
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
main()
|
pedro2d10/SickRage-FR | refs/heads/develop | lib/sqlalchemy/event/__init__.py | 79 | # event/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import CANCEL, NO_RETVAL, listen, listens_for, remove, contains
from .base import Events, dispatcher
from .attr import RefCollection
from .legacy import _legacy_signature
|
victorbriz/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-force-symbol-reference.py | 237 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure ForceSymbolReference is translated properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('force-symbol-reference.gyp', chdir=CHDIR)
test.build('force-symbol-reference.gyp', test.ALL, chdir=CHDIR)
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_force_reference.exe', chdir=CHDIR))
if '?x@@YAHXZ:' not in output or '?y@@YAHXZ:' not in output:
test.fail_test()
test.pass_test()
|
HarborYuan/cashier | refs/heads/master | env/Lib/site-packages/pip/_vendor/requests/utils.py | 319 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
Morphux/installer | refs/heads/master | pkgs/tar_p1/__init__.py | 12133432 | |
mintoo/NetDim | refs/heads/master | pyNMS/graph_generation/__init__.py | 12133432 | |
lmorchard/django | refs/heads/master | tests/get_earliest_or_latest/__init__.py | 12133432 | |
pasqualguerrero/django | refs/heads/master | tests/select_for_update/__init__.py | 12133432 | |
disqus/django-old | refs/heads/master | django/conf/locale/nl/__init__.py | 12133432 | |
lz1988/django-web2015 | refs/heads/master | django/contrib/sitemaps/management/commands/__init__.py | 12133432 | |
akrherz/pyWWA | refs/heads/main | parsers/ffg_parser.py | 1 | """ FFG """
# Local
from pywwa.workflows.ffg_parser import main
if __name__ == "__main__":
main()
|
jniediek/combinato | refs/heads/master | combinato/cluster/plot_temp.py | 1 | # JN 2015-01-13
from __future__ import absolute_import, print_function, division
import matplotlib.pyplot as mpl
from .. import options
def plot_temperatures(tree, used_points):
"""
show which clusters were selected in temperature plot
"""
upto_line = options['MaxClustersPerTemp'] + 5
fig = mpl.figure(figsize=options['tempfigsize'])
plot = fig.add_subplot(1, 1, 1)
plot.grid(True)
plot.plot(tree[:, 1], tree[:, 4:upto_line])
plot.set_yscale('log')
plot.set_xlim((tree[0, 1], tree[-1, 1]))
for row, col, color in used_points:
x = tree[row, 1]
y = tree[row, col]
plot.scatter(x, y, color=color)
plot.text(x, y, '{:.0f}'.format(y))
return fig
|
Observer-Wu/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py | 139 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
# Note: request.connection.write is used in this module, even though mod_python
# document says that it should be used only in connection handlers.
# Unfortunately, we have no other options. For example, request.write is not
# suitable because it doesn't allow direct raw bytes writing.
import base64
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.extensions import is_compression_extension
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import parse_token_list
from mod_pywebsocket.handshake._base import validate_mandatory_header
from mod_pywebsocket.handshake._base import validate_subprotocol
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
# disallows non-zero padding, so the character right before == must be any of
# A, Q, g and w.
_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
# Defining aliases for values used frequently.
_VERSION_HYBI08 = common.VERSION_HYBI08
_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
_VERSION_HYBI08,
]
def compute_accept(key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = util.sha1_hash(
key + common.WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource during handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def _validate_connection_header(self):
connection = get_mandatory_header(
self._request, common.CONNECTION_HEADER)
try:
connection_tokens = parse_token_list(connection)
except HandshakeException, e:
raise HandshakeException(
'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
connection_is_valid = False
for token in connection_tokens:
if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
connection_is_valid = True
break
if not connection_is_valid:
raise HandshakeException(
'%s header doesn\'t contain "%s"' %
(common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
check_request_line(self._request)
validate_mandatory_header(
self._request,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE)
self._validate_connection_header()
self._request.ws_resource = self._request.uri
unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
self._request.ws_version = self._check_version()
# This handshake must be based on latest hybi. We are responsible to
# fallback to HTTP on handshake failure as latest hybi handshake
# specifies.
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
# Key validation, response generation.
key = self._get_key()
(accept, accept_binary) = compute_accept(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_ACCEPT_HEADER,
accept,
util.hexify(accept_binary))
self._logger.debug('Protocol version is RFC 6455')
# Setup extension processors.
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
self._request.ws_extension_processors = processors
# List of extra headers. The extra handshake handler may add header
# data as name/value pairs to this list and pywebsocket appends
# them to the WebSocket handshake.
self._request.extra_headers = []
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
processors = filter(lambda processor: processor is not None,
self._request.ws_extension_processors)
# Ask each processor if there are extensions on the request which
# cannot co-exist. When processor decided other processors cannot
# co-exist with it, the processor marks them (or itself) as
# "inactive". The first extension processor has the right to
# make the final call.
for processor in reversed(processors):
if processor.is_active():
processor.check_consistency_with_other_processors(
processors)
processors = filter(lambda processor: processor.is_active(),
processors)
accepted_extensions = []
# We need to take into account of mux extension here.
# If mux extension exists:
# - Remove processors of extensions for logical channel,
# which are processors located before the mux processor
# - Pass extension requests for logical channel to mux processor
# - Attach the mux processor to the request. It will be referred
# by dispatcher to see whether the dispatcher should use mux
# handler or not.
mux_index = -1
for i, processor in enumerate(processors):
if processor.name() == common.MUX_EXTENSION:
mux_index = i
break
if mux_index >= 0:
logical_channel_extensions = []
for processor in processors[:mux_index]:
logical_channel_extensions.append(processor.request())
processor.set_active(False)
self._request.mux_processor = processors[mux_index]
self._request.mux_processor.set_extensions(
logical_channel_extensions)
processors = filter(lambda processor: processor.is_active(),
processors)
stream_options = StreamOptions()
for index, processor in enumerate(processors):
if not processor.is_active():
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
accepted_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
if not is_compression_extension(processor.name()):
continue
# Inactivate all of the following compression extensions.
for j in xrange(index + 1, len(processors)):
if is_compression_extension(processors[j].name()):
processors[j].set_active(False)
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
'Extensions accepted: %r',
map(common.ExtensionParameter.name, accepted_extensions))
else:
self._request.ws_extensions = None
self._request.ws_stream = self._create_stream(stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol)
self._logger.debug(
'Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake(accept)
except HandshakeException, e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _get_origin(self):
if self._request.ws_version is _VERSION_HYBI08:
origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
else:
origin_header = common.ORIGIN_HEADER
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _check_version(self):
version = get_mandatory_header(self._request,
common.SEC_WEBSOCKET_VERSION_HEADER)
if version == _VERSION_HYBI08_STRING:
return _VERSION_HYBI08
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException(
'Unsupported version %r for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
def _set_protocol(self):
self._request.ws_protocol = None
protocol_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if protocol_header is None:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
extensions_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
if not extensions_header:
self._request.ws_requested_extensions = None
return
if self._request.ws_version is common.VERSION_HYBI08:
allow_quoted_string=False
else:
allow_quoted_string=True
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header, allow_quoted_string=allow_quoted_string)
except common.ExtensionParsingException, e:
raise HandshakeException(
'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions))
def _validate_key(self, key):
if key.find(',') >= 0:
raise HandshakeException('Request has multiple %s header lines or '
'contains illegal character \',\': %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
# Validate
key_is_valid = False
try:
# Validate key by quick regex match before parsing by base64
# module. Because base64 module skips invalid characters, we have
# to do this in advance to make this server strictly reject illegal
# keys.
if _SEC_WEBSOCKET_KEY_REGEX.match(key):
decoded_key = base64.b64decode(key)
if len(decoded_key) == 16:
key_is_valid = True
except TypeError, e:
pass
if not key_is_valid:
raise HandshakeException(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
return decoded_key
def _get_key(self):
key = get_mandatory_header(
self._request, common.SEC_WEBSOCKET_KEY_HEADER)
decoded_key = self._validate_key(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
key,
util.hexify(decoded_key))
return key
def _create_stream(self, stream_options):
return Stream(self._request, stream_options)
def _create_handshake_response(self, accept):
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# WebSocket headers
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if self._request.ws_protocol is not None:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
# Headers not specific for WebSocket
for name, value in self._request.extra_headers:
response.append(format_header(name, value))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
raw_response = self._create_handshake_response(accept)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
|
Simran-B/arangodb | refs/heads/docs_3.0 | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_new.py | 58 | import unittest
from test import test_support
import sys
new = test_support.import_module('new', deprecated=True)
class NewTest(unittest.TestCase):
def test_spam(self):
class Eggs:
def get_yolks(self):
return self.yolks
m = new.module('Spam')
m.Eggs = Eggs
sys.modules['Spam'] = m
import Spam
def get_more_yolks(self):
return self.yolks + 3
# new.classobj()
C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
# new.instance()
c = new.instance(C, {'yolks': 3})
o = new.instance(C)
self.assertEqual(o.__dict__, {}, "new __dict__ should be empty")
del o
o = new.instance(C, None)
self.assertEqual(o.__dict__, {}, "new __dict__ should be empty")
del o
def break_yolks(self):
self.yolks = self.yolks - 2
# new.instancemethod()
im = new.instancemethod(break_yolks, c, C)
self.assertEqual(c.get_yolks(), 3,
'Broken call of hand-crafted class instance')
self.assertEqual(c.get_more_yolks(), 6,
'Broken call of hand-crafted class instance')
im()
self.assertEqual(c.get_yolks(), 1,
'Broken call of hand-crafted instance method')
self.assertEqual(c.get_more_yolks(), 4,
'Broken call of hand-crafted instance method')
im = new.instancemethod(break_yolks, c)
im()
self.assertEqual(c.get_yolks(), -1)
# Verify that dangerous instance method creation is forbidden
self.assertRaises(TypeError, new.instancemethod, break_yolks, None)
# Verify that instancemethod() doesn't allow keyword args
self.assertRaises(TypeError, new.instancemethod, break_yolks, c, kw=1)
def test_scope(self):
# It's unclear what the semantics should be for a code object compiled
# at module scope, but bound and run in a function. In CPython, `c' is
# global (by accident?) while in Jython, `c' is local. The intent of
# the test clearly is to make `c' global, so let's be explicit about it.
codestr = '''
global c
a = 1
b = 2
c = a + b
'''
codestr = "\n".join(l.strip() for l in codestr.splitlines())
ccode = compile(codestr, '<string>', 'exec')
# Jython doesn't have a __builtins__, so use a portable alternative
import __builtin__
g = {'c': 0, '__builtins__': __builtin__}
# this test could be more robust
func = new.function(ccode, g)
func()
self.assertEqual(g['c'], 3, 'Could not create a proper function object')
def test_function(self):
# test the various extended flavors of function.new
def f(x):
def g(y):
return x + y
return g
g = f(4)
new.function(f.func_code, {}, "blah")
g2 = new.function(g.func_code, {}, "blah", (2,), g.func_closure)
self.assertEqual(g2(), 6)
g3 = new.function(g.func_code, {}, "blah", None, g.func_closure)
self.assertEqual(g3(5), 9)
def test_closure(func, closure, exc):
self.assertRaises(exc, new.function, func.func_code, {}, "", None, closure)
test_closure(g, None, TypeError) # invalid closure
test_closure(g, (1,), TypeError) # non-cell in closure
test_closure(g, (1, 1), ValueError) # closure is wrong size
test_closure(f, g.func_closure, ValueError) # no closure needed
# Note: Jython will never have new.code()
if hasattr(new, 'code'):
def test_code(self):
# bogus test of new.code()
def f(a): pass
c = f.func_code
argcount = c.co_argcount
nlocals = c.co_nlocals
stacksize = c.co_stacksize
flags = c.co_flags
codestring = c.co_code
constants = c.co_consts
names = c.co_names
varnames = c.co_varnames
filename = c.co_filename
name = c.co_name
firstlineno = c.co_firstlineno
lnotab = c.co_lnotab
freevars = c.co_freevars
cellvars = c.co_cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab, freevars, cellvars)
# test backwards-compatibility version with no freevars or cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
# negative co_argcount used to trigger a SystemError
self.assertRaises(ValueError, new.code,
-argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name, firstlineno, lnotab)
# negative co_nlocals used to trigger a SystemError
self.assertRaises(ValueError, new.code,
argcount, -nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name, firstlineno, lnotab)
# non-string co_name used to trigger a Py_FatalError
self.assertRaises(TypeError, new.code,
argcount, nlocals, stacksize, flags, codestring,
constants, (5,), varnames, filename, name, firstlineno, lnotab)
# new.code used to be a way to mutate a tuple...
class S(str):
pass
t = (S("ab"),)
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, t, varnames, filename, name,
firstlineno, lnotab)
self.assert_(type(t[0]) is S, "eek, tuple changed under us!")
def test_main():
test_support.run_unittest(NewTest)
if __name__ == "__main__":
test_main()
|
ravenac95/virtstrap | refs/heads/develop | virtstrap-core/virtstrap/packages/simpleyaml/emitter.py | 388 |
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
u'tag:yaml.org,2002:' : u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = self.event.tags.keys()
handles.sort()
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = self.tag_prefixes.keys()
prefixes.sort()
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u'\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
hints = u''
if text:
if text[0] in u' \n\x85\u2028\u2029':
hints += unicode(self.best_indent)
if text[-1] not in u'\n\x85\u2028\u2029':
hints += u'-'
elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
hints += u'+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'>'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'|'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
|
frankvdp/django | refs/heads/master | tests/max_lengths/models.py | 142 | from django.db import models
class PersonWithDefaultMaxLengths(models.Model):
email = models.EmailField()
vcard = models.FileField()
homepage = models.URLField()
avatar = models.FilePathField()
class PersonWithCustomMaxLengths(models.Model):
email = models.EmailField(max_length=250)
vcard = models.FileField(max_length=250)
homepage = models.URLField(max_length=250)
avatar = models.FilePathField(max_length=250)
|
showerst/openstates | refs/heads/master | openstates/ct/__init__.py | 1 | import datetime
import lxml.html
from .bills import CTBillScraper
from .legislators import CTLegislatorScraper
from .events import CTEventScraper
settings = {
'SCRAPELIB_RPM': 20
}
metadata = {
'name': 'Connecticut',
'abbreviation': 'ct',
'legislature_name': 'Connecticut General Assembly',
'legislature_url': 'http://www.cga.ct.gov/',
'capitol_timezone': 'America/New_York',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011', '2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013', '2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015', '2016'],
},
],
'session_details': {
'2011': {
'display_name': '2011 Regular Session',
'_scraped_name': '2011',
},
'2012': {
'display_name': '2012 Regular Session',
'_scraped_name': '2012',
},
'2013': {
'display_name': '2013 Regular Session',
'_scraped_name': '2013',
},
'2014': {
'display_name': '2014 Regular Session',
'_scraped_name': '2014',
},
'2015': {
'display_name': '2015 Regular Session',
'_scraped_name': '2015',
},
'2016': {
'display_name': '2016 Regular Session',
'start_date': datetime.date(2016, 2, 3),
'end_date': datetime.date(2016, 5, 4),
'_scraped_name': '2016',
},
},
'feature_flags': ['subjects', 'events', 'influenceexplorer'],
'_ignored_scraped_sessions': [
'2010',
'2009',
'2008',
'2007',
'2006',
'2005',
],
}
def session_list():
import scrapelib
text = scrapelib.Scraper().get('ftp://ftp.cga.ct.gov').text
sessions = [line.split()[-1] for line in text.splitlines()]
for not_session_name in ('incoming', 'pub', 'CGAAudio', 'rba', 'NCSL',"apaac", 'FOI_1'):
sessions.remove(not_session_name)
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = ' '.join(p.text_content() for p in doc.xpath('//body/p'))
return text
|
dream1986/you-get | refs/heads/develop | src/you_get/extractors/qq.py | 3 | #!/usr/bin/env python
__all__ = ['qq_download']
from ..common import *
import uuid
#QQMUSIC
#SINGLE
#1. http://y.qq.com/#type=song&mid=000A9lMb0iEqwN
#2. http://y.qq.com/#type=song&id=4754713
#3. http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=002NqCeX3owQIw
#4. http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songid=4754713
#ALBUM
#1. http://y.qq.com/y/static/album/3/c/00385vBa0n3O3c.html?pgv_ref=qqmusic.y.index.music.pic1
#2. http://y.qq.com/#type=album&mid=004c62RC2uujor
#MV
#can download as video through qq_download_by_id
#1. http://y.qq.com/y/static/mv/mv_play.html?vid=i0014ufczcw
def qq_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):
xml = get_html('http://www.acfun.tv/getinfo?vids=%s' % id)
from xml.dom.minidom import parseString
doc = parseString(xml)
doc_root = doc.getElementsByTagName('root')[0]
doc_vl = doc_root.getElementsByTagName('vl')[0]
doc_vi = doc_vl.getElementsByTagName('vi')[0]
fn = doc_vi.getElementsByTagName('fn')[0].firstChild.data
# fclip = doc_vi.getElementsByTagName('fclip')[0].firstChild.data
# fc=doc_vi.getElementsByTagName('fc')[0].firstChild.data
fvkey = doc_vi.getElementsByTagName('fvkey')[0].firstChild.data
doc_ul = doc_vi.getElementsByTagName('ul')
url = doc_ul[0].getElementsByTagName('url')[1].firstChild.data
# print(i.firstChild.data)
urls=[]
ext=fn[-3:]
size=0
for i in doc.getElementsByTagName("cs"):
size+=int(i.firstChild.data)
# size=sum(map(int,doc.getElementsByTagName("cs")))
locid=str(uuid.uuid4())
for i in doc.getElementsByTagName("ci"):
urls.append(url+fn[:-4] + "." + i.getElementsByTagName("idx")[0].firstChild.data + fn[-4:] + '?vkey=' + fvkey+ '&sdtfrom=v1000&type='+ fn[-3:0] +'&locid=' + locid + "&&level=1&platform=11&br=133&fmt=hd&sp=0")
# if int(fclip) > 0:
# fn = fn[:-4] + "." + fclip + fn[-4:]
# url = url + fn + '?vkey=' + fvkey
# _, ext, size = url_info(url)
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def qq_download(url, output_dir = '.', merge = True, info_only = False):
if re.match(r'http://v.qq.com/([^\?]+)\?vid', url):
aid = r1(r'(.*)\.html', url)
vid = r1(r'http://v.qq.com/[^\?]+\?vid=(\w+)', url)
url = 'http://sns.video.qq.com/tvideo/fcgi-bin/video?vid=%s' % vid
if re.match(r'http://y.qq.com/([^\?]+)\?vid', url):
vid = r1(r'http://y.qq.com/[^\?]+\?vid=(\w+)', url)
url = "http://v.qq.com/page/%s.html" % vid
r_url = r1(r'<meta http-equiv="refresh" content="0;url=([^"]*)', get_html(url))
if r_url:
aid = r1(r'(.*)\.html', r_url)
url = "%s/%s.html" % (aid, vid)
if re.match(r'http://static.video.qq.com/.*vid=', url):
vid = r1(r'http://static.video.qq.com/.*vid=(\w+)', url)
url = "http://v.qq.com/page/%s.html" % vid
if re.match(r'http://v.qq.com/cover/.*\.html', url):
html = get_html(url)
vid = r1(r'vid:"([^"]+)"', html)
url = 'http://sns.video.qq.com/tvideo/fcgi-bin/video?vid=%s' % vid
html = get_html(url)
title = match1(html, r'<title>(.+?)</title>', r'title:"([^"]+)"')[0].strip()
assert title
title = unescape_html(title)
title = escape_file_path(title)
try:
id = vid
except:
id = r1(r'vid:"([^"]+)"', html)
qq_download_by_id(id, title, output_dir = output_dir, merge = merge, info_only = info_only)
site_info = "QQ.com"
download = qq_download
download_playlist = playlist_not_supported('qq')
|
fredrhae/rotas | refs/heads/master | Rotas/GoogleTest/gtest-1.7.0/test/gtest_test_utils.py | 408 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
stinos/micropython | refs/heads/master | tests/basics/array1.py | 9 | try:
import uarray as array
except ImportError:
try:
import array
except ImportError:
print("SKIP")
raise SystemExit
a = array.array('B', [1, 2, 3])
print(a, len(a))
i = array.array('I', [1, 2, 3])
print(i, len(i))
print(a[0])
print(i[-1])
a = array.array('l', [-1])
print(len(a), a[0])
a1 = array.array('l', [1, 2, 3])
a2 = array.array('L', [1, 2, 3])
print(a2[1])
print(a1 == a2)
# Empty arrays
print(len(array.array('h')))
print(array.array('i'))
# bool operator acting on arrays
print(bool(array.array('i')))
print(bool(array.array('i', [1])))
# containment, with incorrect type
print('12' in array.array('B', b'12'))
print([] in array.array('B', b'12'))
# bad typecode
try:
array.array('X')
except ValueError:
print("ValueError")
# equality (CPython requires both sides are array)
print(bytes(array.array('b', [0x61, 0x62, 0x63])) == b'abc')
print(array.array('b', [0x61, 0x62, 0x63]) == b'abc')
print(array.array('b', [0x61, 0x62, 0x63]) != b'abc')
print(array.array('b', [0x61, 0x62, 0x63]) == b'xyz')
print(array.array('b', [0x61, 0x62, 0x63]) != b'xyz')
print(b'abc' == array.array('b', [0x61, 0x62, 0x63]))
print(b'abc' != array.array('b', [0x61, 0x62, 0x63]))
print(b'xyz' == array.array('b', [0x61, 0x62, 0x63]))
print(b'xyz' != array.array('b', [0x61, 0x62, 0x63]))
class X(array.array):
pass
print(bytes(X('b', [0x61, 0x62, 0x63])) == b'abc')
print(X('b', [0x61, 0x62, 0x63]) == b'abc')
print(X('b', [0x61, 0x62, 0x63]) != b'abc')
print(X('b', [0x61, 0x62, 0x63]) == array.array('b', [0x61, 0x62, 0x63]))
print(X('b', [0x61, 0x62, 0x63]) != array.array('b', [0x61, 0x62, 0x63]))
|
notriddle/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/py/testing/root/__init__.py | 9480 | #
|
onceuponatimeforever/oh-mainline | refs/heads/master | mysite/customs/migrations/0009_add_cia_model.py | 17 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.customs.models import *
class Migration:
def forwards(self, orm):
# Adding model 'RecentMessageFromCIA'
db.create_table('customs_recentmessagefromcia', (
('id', orm['customs.recentmessagefromcia:id']),
('committer_identifier', orm['customs.recentmessagefromcia:committer_identifier']),
('project_name', orm['customs.recentmessagefromcia:project_name']),
('path', orm['customs.recentmessagefromcia:path']),
('version', orm['customs.recentmessagefromcia:version']),
('message', orm['customs.recentmessagefromcia:message']),
('module', orm['customs.recentmessagefromcia:module']),
('branch', orm['customs.recentmessagefromcia:branch']),
('time_received', orm['customs.recentmessagefromcia:time_received']),
))
db.send_create_signal('customs', ['RecentMessageFromCIA'])
def backwards(self, orm):
# Deleting model 'RecentMessageFromCIA'
db.delete_table('customs_recentmessagefromcia')
models = {
'customs.recentmessagefromcia': {
'branch': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'committer_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_received': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customs.roundupbugtracker': {
'components': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'csv_keyword': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_these_roundup_bug_statuses': ('django.db.models.fields.CharField', [], {'default': "'-1,1,2,3,4,5,6'", 'max_length': '255'}),
'my_bugs_are_always_good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'my_bugs_concern_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'roundup_root_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True'})
}
}
complete_apps = ['customs']
|
StefanRijnhart/odoo | refs/heads/master | openerp/addons/base/module/wizard/base_export_language.py | 178 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2004-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Modules To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'name': 'lang.tar.gz',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = map(lambda m: m.name, this.modules) or ['all']
mods.sort()
buf = cStringIO.StringIO()
tools.trans_export(lang, mods, buf, this.format, cr)
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
this.name = "%s.%s" % (filename, this.format)
out = base64.encodestring(buf.getvalue())
buf.close()
self.write(cr, uid, ids, {'state': 'get',
'data': out,
'name':this.name}, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zarlant/ansible_core | refs/heads/vmware_add_disk | packaging/os/yum.py | 3 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
import os
import yum
import rpm
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename
transaction_helpers = True
except:
transaction_helpers = False
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages."
required: true
default: null
aliases: []
exclude:
description:
- "Package name(s) to exclude when state=present, or latest"
required: false
version_added: "2.0"
default: null
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
required: false
default: null
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: false
choices: [ "present", "latest", "absent" ]
default: "present"
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "0.9"
default: null
aliases: []
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "0.9"
default: null
aliases: []
conf_file:
description:
- The remote yum configuration file to use for the transaction.
required: false
version_added: "0.6"
default: null
aliases: []
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
required: false
version_added: "1.2"
default: "no"
choices: ["yes", "no"]
aliases: []
update_cache:
description:
- Force updating the cache. Has an effect only if state is I(present)
or I(latest).
required: false
version_added: "1.9"
default: "no"
choices: ["yes", "no"]
aliases: []
notes: []
# informational: requirements for nodes
requirements: [ yum ]
author: Seth Vidal
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum: name=httpd state=latest
- name: remove the Apache package
yum: name=httpd state=absent
- name: install the latest version of Apache from the testing repo
yum: name=httpd enablerepo=testing state=present
- name: install one specific version of Apache
yum: name=httpd-2.2.29-1.4.amzn1 state=present
- name: upgrade all packages
yum: name=* state=latest
- name: install the nginx rpm from a remote repo
yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install nginx rpm from a local file
yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
- name: install the 'Development tools' package group
yum: name="@Development tools" state=present
'''
def_qf = "%{name}-%{version}-%{release}.%{arch}"
repoquery='/usr/bin/repoquery'
if not os.path.exists(repoquery):
repoquery = None
yumbin='/usr/bin/yum'
import syslog
def log(msg):
syslog.openlog('ansible-yum', 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, msg)
def yum_base(conf_file=None):
my = yum.YumBase()
my.preconf.debuglevel=0
my.preconf.errorlevel=0
if conf_file and os.path.exists(conf_file):
my.preconf.fn = conf_file
return my
def install_yum_utils(module):
if not module.check_mode:
yum_path = module.get_bin_path('yum')
if yum_path:
rc, so, se = module.run_command('%s -y install yum-utils' % yum_path)
if rc == 0:
this_path = module.get_bin_path('repoquery')
global repoquery
repoquery = this_path
def po_to_nevra(po):
if hasattr(po, 'ui_nevra'):
return po.ui_nevra
else:
return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch)
def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in dis_repos:
my.repos.disableRepo(rid)
for rid in en_repos:
my.repos.enableRepo(rid)
e,m,u = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if not is_pkg:
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
rc2,out2,err2 = module.run_command(cmd)
else:
rc2,out2,err2 = (0, '', '')
if rc == 0 and rc2 == 0:
out += out2
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in dis_repos:
my.repos.disableRepo(rid)
for rid in en_repos:
my.repos.enableRepo(rid)
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return [ p for p in out.split('\n') if p.strip() ]
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
retpkgs = []
pkgs = []
updates = []
try:
my = yum_base(conf_file)
for rid in dis_repos:
my.repos.disableRepo(rid)
for rid in en_repos:
my.repos.enableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
for pkg in pkgs:
if pkg in updates:
retpkgs.append(pkg)
return set([ po_to_nevra(p) for p in retpkgs ])
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ])
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]):
if not repoq:
pkgs = []
try:
my = yum_base(conf_file)
for rid in dis_repos:
my.repos.disableRepo(rid)
for rid in en_repos:
my.repos.enableRepo(rid)
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
if not pkgs:
e,m,u = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e,m,u = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception, e:
module.fail_json(msg="Failure talking to yum: %s" % e)
return set([ po_to_nevra(p) for p in pkgs ])
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc,out,err = module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2,out2,err2 = module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([ p for p in out.split('\n') if p.strip() ])
if not pkgs:
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf)
return pkgs
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return []
def transaction_exists(pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = []
for pkg in pkglist:
pkglist_nvreas.append(splitFilename(pkg))
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n,v,r,e,a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n,a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n,a))
break
return conflicts
def local_nvra(module, path):
"""return nvra of a local rpm passed in"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
finally:
os.close(fd)
return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH])
def pkg_to_dict(pkgstr):
if pkgstr.strip():
n,e,v,r,a,repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name':n,
'arch':a,
'epoch':e,
'release':r,
'version':v,
'repo':repo,
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(module, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc,out,err = module.run_command(cmd)
ret = []
if rc == 0:
ret = set([ p for p in out.split('\n') if p.strip() ])
return ret
def list_stuff(module, conf_file, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet']
if conf_file and os.path.exists(conf_file):
repoq += ['-c', conf_file]
if stuff == 'installed':
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'updates':
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'available':
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
elif stuff == 'repos':
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
else:
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
# check if pkgspec is installed (if possible for idempotence)
# localpkg
if spec.endswith('.rpm') and '://' not in spec:
# get the pkg name-v-r.arch
if not os.path.exists(spec):
res['msg'] += "No Package file matching '%s' found on system" % spec
module.fail_json(**res)
nvra = local_nvra(module, spec)
# look for them in the rpmdb
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if they are there, skip it
continue
pkg = spec
# URL
elif '://' in spec:
pkg = spec
#groups :(
elif spec.startswith('@'):
# complete wild ass guess b/c it's a group
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*','?']).intersection(set(spec)):
pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
if pkgs:
res['results'].append('%s providing %s is already installed' % (pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
cmd = yum_basecmd + ['install', pkg]
if module.check_mode:
module.exit_json(changed=True)
changed = True
rc, out, err = module.run_command(cmd)
# Fail on invalid urls:
if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
module.fail_json(changed=False,msg=err,rc=1)
elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
# avoid failing in the 'Nothing To Do' case
# this may happen with an URL spec.
# for an already installed group,
# we get rc = 0 and 'Nothing to do' in out, not in err.
rc = 0
err = ''
out = '%s: Nothing to do' % spec
changed = False
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for the pkg in rpmdb
# look for the pkg via obsoletes
# accumulate any changes
res['changed'] |= changed
module.exit_json(**res)
def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
is_group = False
# group remove - this is doom on a stick
if pkg.startswith('@'):
is_group = True
else:
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['results'].append('%s is not installed' % pkg)
continue
# run an actual yum transaction
cmd = yum_basecmd + ["remove", pkg]
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we should check to see if the pkg is no longer present
if not is_group: # we can't sensibly check for a group being uninstalled reliably
# look to see if the pkg shows up from is_installed. If it doesn't
if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos):
res['changed'] = True
else:
module.fail_json(**res)
if rc != 0:
module.fail_json(**res)
module.exit_json(**res)
def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for spec in items:
pkg = None
basecmd = 'update'
cmd = ''
# groups, again
if spec.startswith('@'):
pkg = spec
elif spec == '*': #update all
# use check-update to see if there is any need
rc,out,err = module.run_command(yum_basecmd + ['check-update'])
if rc == 100:
cmd = yum_basecmd + [basecmd]
else:
res['results'].append('All packages up to date')
continue
# dep/pkgname - find it
else:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
basecmd = 'update'
else:
basecmd = 'install'
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
nothing_to_do = False
break
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos):
nothing_to_do = False
break
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
pkg = spec
if not cmd:
cmd = yum_basecmd + [basecmd, pkg]
if module.check_mode:
return module.exit_json(changed=True)
rc, out, err = module.run_command(cmd)
res['rc'] += rc
res['results'].append(out)
res['msg'] += err
# FIXME if it is - update it and check to see if it applied
# check to see if there is no longer an update available for the pkgspec
if rc:
res['failed'] = True
else:
res['changed'] = True
module.exit_json(**res)
def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
disable_gpg_check, exclude):
# need debug level 2 to get 'Nothing to do' for groupinstall.
yum_basecmd = [yumbin, '-d', '2', '-y']
if not repoquery:
repoq = None
else:
repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet']
if conf_file and os.path.exists(conf_file):
yum_basecmd += ['-c', conf_file]
if repoq:
repoq += ['-c', conf_file]
dis_repos =[]
en_repos = []
if disablerepo:
dis_repos = disablerepo.split(',')
r_cmd = ['--disablerepo=%s' % disablerepo]
yum_basecmd.extend(r_cmd)
if enablerepo:
en_repos = enablerepo.split(',')
r_cmd = ['--enablerepo=%s' % enablerepo]
yum_basecmd.extend(r_cmd)
if exclude:
e_cmd = ['--exclude=%s' % exclude]
yum_basecmd.extend(e_cmd)
if state in ['installed', 'present', 'latest']:
if module.params.get('update_cache'):
module.run_command(yum_basecmd + ['makecache'])
my = yum_base(conf_file)
try:
if disablerepo:
my.repos.disableRepo(disablerepo)
current_repos = my.repos.repos.keys()
if enablerepo:
try:
my.repos.enableRepo(enablerepo)
new_repos = my.repos.repos.keys()
for i in new_repos:
if not i in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid
current_repos = new_repos
except yum.Errors.YumBaseError, e:
module.fail_json(msg="Error setting/accessing repos: %s" % (e))
except yum.Errors.YumBaseError, e:
module.fail_json(msg="Error accessing repos: %s" % e)
if state in ['installed', 'present']:
if disable_gpg_check:
yum_basecmd.append('--nogpgcheck')
install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
elif state in ['removed', 'absent']:
remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
elif state == 'latest':
if disable_gpg_check:
yum_basecmd.append('--nogpgcheck')
latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos)
# should be caught by AnsibleModule argument_spec
return dict(changed=False, failed=True, results='', errors='unexpected state')
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule(
argument_spec = dict(
name=dict(aliases=['pkg'], type="list"),
exclude=dict(required=False, default=None),
# removed==absent, installed==present, these are accepted as aliases
state=dict(default='installed', choices=['absent','present','installed','removed','latest']),
enablerepo=dict(),
disablerepo=dict(),
list=dict(),
conf_file=dict(default=None),
disable_gpg_check=dict(required=False, default="no", type='bool'),
update_cache=dict(required=False, default="no", type='bool'),
# this should not be needed, but exists as a failsafe
install_repoquery=dict(required=False, default="yes", type='bool'),
),
required_one_of = [['name','list']],
mutually_exclusive = [['name','list']],
supports_check_mode = True
)
# this should not be needed, but exists as a failsafe
params = module.params
if params['install_repoquery'] and not repoquery and not module.check_mode:
install_yum_utils(module)
if params['list']:
if not repoquery:
module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = dict(results=list_stuff(module, params['conf_file'], params['list']))
module.exit_json(**results)
else:
pkg = [ p.strip() for p in params['name']]
exclude = params['exclude']
state = params['state']
enablerepo = params.get('enablerepo', '')
disablerepo = params.get('disablerepo', '')
disable_gpg_check = params['disable_gpg_check']
res = ensure(module, state, pkg, params['conf_file'], enablerepo,
disablerepo, disable_gpg_check, exclude)
module.fail_json(msg="we should never get here unless this all failed", **res)
# import module snippets
from ansible.module_utils.basic import *
main()
|
operepo/ope | refs/heads/master | libs/paramiko/kex_gex.py | 36 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Variant on `KexGroup1 <paramiko.kex_group1.KexGroup1>` where the prime "p" and
generator "g" are provided by the server. A bit more work is required on the
client side, and a **lot** more on the server side.
"""
import os
from hashlib import sha1
from paramiko import util
from paramiko.common import DEBUG
from paramiko.message import Message
from paramiko.py3compat import byte_chr, byte_ord, byte_mask
from paramiko.ssh_exception import SSHException
_MSG_KEXDH_GEX_REQUEST_OLD, _MSG_KEXDH_GEX_GROUP, _MSG_KEXDH_GEX_INIT, \
_MSG_KEXDH_GEX_REPLY, _MSG_KEXDH_GEX_REQUEST = range(30, 35)
c_MSG_KEXDH_GEX_REQUEST_OLD, c_MSG_KEXDH_GEX_GROUP, c_MSG_KEXDH_GEX_INIT, \
c_MSG_KEXDH_GEX_REPLY, c_MSG_KEXDH_GEX_REQUEST = [byte_chr(c) for c in range(30, 35)]
class KexGex (object):
name = 'diffie-hellman-group-exchange-sha1'
min_bits = 1024
max_bits = 8192
preferred_bits = 2048
def __init__(self, transport):
self.transport = transport
self.p = None
self.q = None
self.g = None
self.x = None
self.e = None
self.f = None
self.old_style = False
def start_kex(self, _test_old_style=False):
if self.transport.server_mode:
self.transport._expect_packet(_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
# minimum up above 1024.
m = Message()
if _test_old_style:
# only used for unit tests: we shouldn't ever send this
m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
m.add_int(self.preferred_bits)
self.old_style = True
else:
m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
m.add_int(self.min_bits)
m.add_int(self.preferred_bits)
m.add_int(self.max_bits)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
def parse_next(self, ptype, m):
if ptype == _MSG_KEXDH_GEX_REQUEST:
return self._parse_kexdh_gex_request(m)
elif ptype == _MSG_KEXDH_GEX_GROUP:
return self._parse_kexdh_gex_group(m)
elif ptype == _MSG_KEXDH_GEX_INIT:
return self._parse_kexdh_gex_init(m)
elif ptype == _MSG_KEXDH_GEX_REPLY:
return self._parse_kexdh_gex_reply(m)
elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
return self._parse_kexdh_gex_request_old(m)
raise SSHException('KexGex asked to handle packet type %d' % ptype)
### internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
q = (self.p - 1) // 2
qnorm = util.deflate_long(q, 0)
qhbyte = byte_ord(qnorm[0])
byte_count = len(qnorm)
qmask = 0xff
while not (qhbyte & 0x80):
qhbyte <<= 1
qmask >>= 1
while True:
x_bytes = os.urandom(byte_count)
x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
x = util.inflate_long(x_bytes, 1)
if (x > 1) and (x < q):
break
self.x = x
def _parse_kexdh_gex_request(self, m):
minbits = m.get_int()
preferredbits = m.get_int()
maxbits = m.get_int()
# smoosh the user's preferred size into our own limits
if preferredbits > self.max_bits:
preferredbits = self.max_bits
if preferredbits < self.min_bits:
preferredbits = self.min_bits
# fix min/max if they're inconsistent. technically, we could just pout
# and hang up, but there's no harm in giving them the benefit of the
# doubt and just picking a bitsize for them.
if minbits > preferredbits:
minbits = preferredbits
if maxbits < preferredbits:
maxbits = preferredbits
# now save a copy
self.min_bits = minbits
self.preferred_bits = preferredbits
self.max_bits = maxbits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException('Can\'t do server-side gex with no modulus pack')
self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits))
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
def _parse_kexdh_gex_request_old(self, m):
# same as above, but without min_bits or max_bits (used by older clients like putty)
self.preferred_bits = m.get_int()
# smoosh the user's preferred size into our own limits
if self.preferred_bits > self.max_bits:
self.preferred_bits = self.max_bits
if self.preferred_bits < self.min_bits:
self.preferred_bits = self.min_bits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException('Can\'t do server-side gex with no modulus pack')
self.transport._log(DEBUG, 'Picking p (~ %d bits)' % (self.preferred_bits,))
self.g, self.p = pack.get_modulus(self.min_bits, self.preferred_bits, self.max_bits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
self.old_style = True
def _parse_kexdh_gex_group(self, m):
self.p = m.get_mpint()
self.g = m.get_mpint()
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen)
self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen)
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
def _parse_kexdh_gex_init(self, m):
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.p - 1):
raise SSHException('Client kex "e" is out of range')
self._generate_x()
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init,
key)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexdh_gex_reply(self, m):
host_key = m.get_string()
self.f = m.get_mpint()
sig = m.get_string()
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init,
host_key)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, sha1(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
|
nzavagli/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/conch/client/agent.py | 69 | # -*- test-case-name: twisted.conch.test.test_default -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Accesses the key agent for user authentication.
Maintainer: Paul Swartz
"""
import os
from twisted.conch.ssh import agent, channel, keys
from twisted.internet import protocol, reactor
from twisted.python import log
class SSHAgentClient(agent.SSHAgentClient):
def __init__(self):
agent.SSHAgentClient.__init__(self)
self.blobs = []
def getPublicKeys(self):
return self.requestIdentities().addCallback(self._cbPublicKeys)
def _cbPublicKeys(self, blobcomm):
log.msg('got %i public keys' % len(blobcomm))
self.blobs = [x[0] for x in blobcomm]
def getPublicKey(self):
"""
Return a L{Key} from the first blob in C{self.blobs}, if any, or
return C{None}.
"""
if self.blobs:
return keys.Key.fromString(self.blobs.pop(0))
return None
class SSHAgentForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal)
d = cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
d.addCallback(self._cbGotLocal)
d.addErrback(lambda x:self.loseConnection())
self.buf = ''
def _cbGotLocal(self, local):
self.local = local
self.dataReceived = self.local.transport.write
self.local.dataReceived = self.write
def dataReceived(self, data):
self.buf += data
def closed(self):
if self.local:
self.local.loseConnection()
self.local = None
class SSHAgentForwardingLocal(protocol.Protocol):
pass
|
dhirajt/dtc_bus_routes | refs/heads/master | dtc_bus_routes/__init__.py | 12133432 | |
ase1/421_521_final_project | refs/heads/master | GUI/text_to_image_test/text_to_image.py | 12133432 | |
jank3/django | refs/heads/master | tests/view_tests/app4/__init__.py | 12133432 | |
shiney-wh/phpsploit | refs/heads/master | src/utils/__init__.py | 1 | """Miscelaneous utils for phpsploit internals
"""
# path related utils
from . import path
from . import ascii
from . import time
|
tudennis/LeetCode---kamyu104-11-24-2015 | refs/heads/master | Python/search-in-rotated-sorted-array-ii.py | 2 | from __future__ import print_function
# Time: O(logn)
# Space: O(1)
#
# Follow up for "Search in Rotated Sorted Array":
# What if duplicates are allowed?
#
# Would this affect the run-time complexity? How and why?
#
# Write a function to determine if a given target is in the array.
#
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if nums[mid] == target:
return True
elif nums[mid] == nums[left]:
left += 1
elif (nums[mid] > nums[left] and nums[left] <= target < nums[mid]) or \
(nums[mid] < nums[left] and not (nums[mid] < target <= nums[right])):
right = mid - 1
else:
left = mid + 1
return False
if __name__ == "__main__":
print(Solution().search([3, 5, 1], 3))
print(Solution().search([2, 2, 3, 3, 4, 1], 1))
print(Solution().search([4, 4, 5, 6, 7, 0, 1, 2], 5))
|
elijah513/django | refs/heads/master | django/contrib/admin/decorators.py | 558 | def register(*models, **kwargs):
"""
Registers the given model(s) classes and wrapped ModelAdmin class with
admin site:
@register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass
A kwarg of `site` can be passed as the admin site, otherwise the default
admin site will be used.
"""
from django.contrib.admin import ModelAdmin
from django.contrib.admin.sites import site, AdminSite
def _model_admin_wrapper(admin_class):
admin_site = kwargs.pop('site', site)
if not isinstance(admin_site, AdminSite):
raise ValueError('site must subclass AdminSite')
if not issubclass(admin_class, ModelAdmin):
raise ValueError('Wrapped class must subclass ModelAdmin.')
admin_site.register(models, admin_class=admin_class)
return admin_class
return _model_admin_wrapper
|
victordion/YouCompleteMe | refs/heads/master | python/ycm/youcompleteme.py | 15 | #!/usr/bin/env python
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import vim
import tempfile
import json
import signal
import base64
from subprocess import PIPE
from ycm import vimsupport
from ycmd import utils
from ycmd.request_wrap import RequestWrap
from ycm.diagnostic_interface import DiagnosticInterface
from ycm.omni_completer import OmniCompleter
from ycm import syntax_parse
from ycm.client.ycmd_keepalive import YcmdKeepalive
from ycm.client.base_request import BaseRequest, BuildRequestData
from ycm.client.completer_available_request import SendCompleterAvailableRequest
from ycm.client.command_request import SendCommandRequest
from ycm.client.completion_request import CompletionRequest
from ycm.client.omni_completion_request import OmniCompletionRequest
from ycm.client.event_notification import ( SendEventNotificationAsync,
EventNotification )
from ycmd.responses import ServerError
try:
from UltiSnips import UltiSnips_Manager
USE_ULTISNIPS_DATA = True
except ImportError:
USE_ULTISNIPS_DATA = False
def PatchNoProxy():
current_value = os.environ.get('no_proxy', '')
additions = '127.0.0.1,localhost'
os.environ['no_proxy'] = ( additions if not current_value
else current_value + ',' + additions )
# We need this so that Requests doesn't end up using the local HTTP proxy when
# talking to ycmd. Users should actually be setting this themselves when
# configuring a proxy server on their machine, but most don't know they need to
# or how to do it, so we do it for them.
# Relevant issues:
# https://github.com/Valloric/YouCompleteMe/issues/641
# https://github.com/kennethreitz/requests/issues/879
PatchNoProxy()
# Force the Python interpreter embedded in Vim (in which we are running) to
# ignore the SIGINT signal. This helps reduce the fallout of a user pressing
# Ctrl-C in Vim.
signal.signal( signal.SIGINT, signal.SIG_IGN )
HMAC_SECRET_LENGTH = 16
NUM_YCMD_STDERR_LINES_ON_CRASH = 30
SERVER_CRASH_MESSAGE_STDERR_FILE_DELETED = (
'The ycmd server SHUT DOWN (restart with :YcmRestartServer). '
'Logfile was deleted; set g:ycm_server_keep_logfiles to see errors '
'in the future.' )
SERVER_CRASH_MESSAGE_STDERR_FILE = (
'The ycmd server SHUT DOWN (restart with :YcmRestartServer). ' +
'Stderr (last {0} lines):\n\n'.format( NUM_YCMD_STDERR_LINES_ON_CRASH ) )
SERVER_CRASH_MESSAGE_SAME_STDERR = (
'The ycmd server SHUT DOWN (restart with :YcmRestartServer). '
' check console output for logs!' )
SERVER_IDLE_SUICIDE_SECONDS = 10800 # 3 hours
class YouCompleteMe( object ):
def __init__( self, user_options ):
self._user_options = user_options
self._user_notified_about_crash = False
self._diag_interface = DiagnosticInterface( user_options )
self._omnicomp = OmniCompleter( user_options )
self._latest_file_parse_request = None
self._latest_completion_request = None
self._server_stdout = None
self._server_stderr = None
self._server_popen = None
self._filetypes_with_keywords_loaded = set()
self._ycmd_keepalive = YcmdKeepalive()
self._SetupServer()
self._ycmd_keepalive.Start()
def _SetupServer( self ):
self._available_completers = {}
server_port = utils.GetUnusedLocalhostPort()
# The temp options file is deleted by ycmd during startup
with tempfile.NamedTemporaryFile( delete = False ) as options_file:
hmac_secret = os.urandom( HMAC_SECRET_LENGTH )
options_dict = dict( self._user_options )
options_dict[ 'hmac_secret' ] = base64.b64encode( hmac_secret )
json.dump( options_dict, options_file )
options_file.flush()
args = [ utils.PathToPythonInterpreter(),
_PathToServerScript(),
'--port={0}'.format( server_port ),
'--options_file={0}'.format( options_file.name ),
'--log={0}'.format( self._user_options[ 'server_log_level' ] ),
'--idle_suicide_seconds={0}'.format(
SERVER_IDLE_SUICIDE_SECONDS )]
if not self._user_options[ 'server_use_vim_stdout' ]:
filename_format = os.path.join( utils.PathToTempDir(),
'server_{port}_{std}.log' )
self._server_stdout = filename_format.format( port = server_port,
std = 'stdout' )
self._server_stderr = filename_format.format( port = server_port,
std = 'stderr' )
args.append('--stdout={0}'.format( self._server_stdout ))
args.append('--stderr={0}'.format( self._server_stderr ))
if self._user_options[ 'server_keep_logfiles' ]:
args.append('--keep_logfiles')
self._server_popen = utils.SafePopen( args, stdin_windows = PIPE,
stdout = PIPE, stderr = PIPE)
BaseRequest.server_location = 'http://127.0.0.1:' + str( server_port )
BaseRequest.hmac_secret = hmac_secret
self._NotifyUserIfServerCrashed()
def IsServerAlive( self ):
returncode = self._server_popen.poll()
# When the process hasn't finished yet, poll() returns None.
return returncode is None
def _NotifyUserIfServerCrashed( self ):
if self._user_notified_about_crash or self.IsServerAlive():
return
self._user_notified_about_crash = True
if self._server_stderr:
try:
with open( self._server_stderr, 'r' ) as server_stderr_file:
error_output = ''.join( server_stderr_file.readlines()[
: - NUM_YCMD_STDERR_LINES_ON_CRASH ] )
vimsupport.PostMultiLineNotice( SERVER_CRASH_MESSAGE_STDERR_FILE +
error_output )
except IOError:
vimsupport.PostVimMessage( SERVER_CRASH_MESSAGE_STDERR_FILE_DELETED )
else:
vimsupport.PostVimMessage( SERVER_CRASH_MESSAGE_SAME_STDERR )
def ServerPid( self ):
if not self._server_popen:
return -1
return self._server_popen.pid
def _ServerCleanup( self ):
if self.IsServerAlive():
self._server_popen.terminate()
def RestartServer( self ):
vimsupport.PostVimMessage( 'Restarting ycmd server...' )
self._user_notified_about_crash = False
self._ServerCleanup()
self._SetupServer()
def CreateCompletionRequest( self, force_semantic = False ):
request_data = BuildRequestData()
if ( not self.NativeFiletypeCompletionAvailable() and
self.CurrentFiletypeCompletionEnabled() ):
wrapped_request_data = RequestWrap( request_data )
if self._omnicomp.ShouldUseNow( wrapped_request_data ):
self._latest_completion_request = OmniCompletionRequest(
self._omnicomp, wrapped_request_data )
return self._latest_completion_request
self._AddExtraConfDataIfNeeded( request_data )
if force_semantic:
request_data[ 'force_semantic' ] = True
self._latest_completion_request = CompletionRequest( request_data )
return self._latest_completion_request
def SendCommandRequest( self, arguments, completer ):
if self.IsServerAlive():
return SendCommandRequest( arguments, completer )
def GetDefinedSubcommands( self ):
if self.IsServerAlive():
try:
return BaseRequest.PostDataToHandler( BuildRequestData(),
'defined_subcommands' )
except ServerError:
return []
else:
return []
def GetCurrentCompletionRequest( self ):
return self._latest_completion_request
def GetOmniCompleter( self ):
return self._omnicomp
def FiletypeCompleterExistsForFiletype( self, filetype ):
try:
return self._available_completers[ filetype ]
except KeyError:
pass
exists_completer = ( self.IsServerAlive() and
bool( SendCompleterAvailableRequest( filetype ) ) )
self._available_completers[ filetype ] = exists_completer
return exists_completer
def NativeFiletypeCompletionAvailable( self ):
return any( [ self.FiletypeCompleterExistsForFiletype( x ) for x in
vimsupport.CurrentFiletypes() ] )
def NativeFiletypeCompletionUsable( self ):
return ( self.CurrentFiletypeCompletionEnabled() and
self.NativeFiletypeCompletionAvailable() )
def OnFileReadyToParse( self ):
self._omnicomp.OnFileReadyToParse( None )
if not self.IsServerAlive():
self._NotifyUserIfServerCrashed()
extra_data = {}
self._AddTagsFilesIfNeeded( extra_data )
self._AddSyntaxDataIfNeeded( extra_data )
self._AddExtraConfDataIfNeeded( extra_data )
self._latest_file_parse_request = EventNotification( 'FileReadyToParse',
extra_data )
self._latest_file_parse_request.Start()
def OnBufferUnload( self, deleted_buffer_file ):
if not self.IsServerAlive():
return
SendEventNotificationAsync( 'BufferUnload',
{ 'unloaded_buffer': deleted_buffer_file } )
def OnBufferVisit( self ):
if not self.IsServerAlive():
return
extra_data = {}
_AddUltiSnipsDataIfNeeded( extra_data )
SendEventNotificationAsync( 'BufferVisit', extra_data )
def OnInsertLeave( self ):
if not self.IsServerAlive():
return
SendEventNotificationAsync( 'InsertLeave' )
def OnCursorMoved( self ):
self._diag_interface.OnCursorMoved()
def OnVimLeave( self ):
self._ServerCleanup()
def OnCurrentIdentifierFinished( self ):
if not self.IsServerAlive():
return
SendEventNotificationAsync( 'CurrentIdentifierFinished' )
def DiagnosticsForCurrentFileReady( self ):
return bool( self._latest_file_parse_request and
self._latest_file_parse_request.Done() )
def GetDiagnosticsFromStoredRequest( self, qflist_format = False ):
if self.DiagnosticsForCurrentFileReady():
diagnostics = self._latest_file_parse_request.Response()
# We set the diagnostics request to None because we want to prevent
# repeated refreshing of the buffer with the same diags. Setting this to
# None makes DiagnosticsForCurrentFileReady return False until the next
# request is created.
self._latest_file_parse_request = None
if qflist_format:
return vimsupport.ConvertDiagnosticsToQfList( diagnostics )
else:
return diagnostics
return []
def UpdateDiagnosticInterface( self ):
if ( self.DiagnosticsForCurrentFileReady() and
self.NativeFiletypeCompletionUsable() ):
self._diag_interface.UpdateWithNewDiagnostics(
self.GetDiagnosticsFromStoredRequest() )
def ShowDetailedDiagnostic( self ):
if not self.IsServerAlive():
return
try:
debug_info = BaseRequest.PostDataToHandler( BuildRequestData(),
'detailed_diagnostic' )
if 'message' in debug_info:
vimsupport.EchoText( debug_info[ 'message' ] )
except ServerError as e:
vimsupport.PostVimMessage( str( e ) )
def DebugInfo( self ):
if self.IsServerAlive():
debug_info = BaseRequest.PostDataToHandler( BuildRequestData(),
'debug_info' )
else:
debug_info = 'Server crashed, no debug info from server'
debug_info += '\nServer running at: {0}'.format(
BaseRequest.server_location )
debug_info += '\nServer process ID: {0}'.format( self._server_popen.pid )
if self._server_stderr or self._server_stdout:
debug_info += '\nServer logfiles:\n {0}\n {1}'.format(
self._server_stdout,
self._server_stderr )
return debug_info
def CurrentFiletypeCompletionEnabled( self ):
filetypes = vimsupport.CurrentFiletypes()
filetype_to_disable = self._user_options[
'filetype_specific_completion_to_disable' ]
if '*' in filetype_to_disable:
return False
else:
return not any([ x in filetype_to_disable for x in filetypes ])
def _AddSyntaxDataIfNeeded( self, extra_data ):
if not self._user_options[ 'seed_identifiers_with_syntax' ]:
return
filetype = vimsupport.CurrentFiletypes()[ 0 ]
if filetype in self._filetypes_with_keywords_loaded:
return
self._filetypes_with_keywords_loaded.add( filetype )
extra_data[ 'syntax_keywords' ] = list(
syntax_parse.SyntaxKeywordsForCurrentBuffer() )
def _AddTagsFilesIfNeeded( self, extra_data ):
def GetTagFiles():
tag_files = vim.eval( 'tagfiles()' )
# getcwd() throws an exception when the CWD has been deleted.
try:
current_working_directory = os.getcwd()
except OSError:
return []
return [ os.path.join( current_working_directory, x ) for x in tag_files ]
if not self._user_options[ 'collect_identifiers_from_tags_files' ]:
return
extra_data[ 'tag_files' ] = GetTagFiles()
def _AddExtraConfDataIfNeeded( self, extra_data ):
def BuildExtraConfData( extra_conf_vim_data ):
return dict( ( expr, vimsupport.VimExpressionToPythonType( expr ) )
for expr in extra_conf_vim_data )
extra_conf_vim_data = self._user_options[ 'extra_conf_vim_data' ]
if extra_conf_vim_data:
extra_data[ 'extra_conf_data' ] = BuildExtraConfData(
extra_conf_vim_data )
def _PathToServerScript():
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, '../../third_party/ycmd/ycmd' )
def _AddUltiSnipsDataIfNeeded( extra_data ):
if not USE_ULTISNIPS_DATA:
return
try:
rawsnips = UltiSnips_Manager._snips( '', 1 )
except:
return
# UltiSnips_Manager._snips() returns a class instance where:
# class.trigger - name of snippet trigger word ( e.g. defn or testcase )
# class.description - description of the snippet
extra_data[ 'ultisnips_snippets' ] = [ { 'trigger': x.trigger,
'description': x.description
} for x in rawsnips ]
|
tlevine/be | refs/heads/master | libbe/command/diff.py | 5 | # Copyright (C) 2005-2012 Aaron Bentley <abentley@panoramicfeedback.com>
# Chris Ball <cjb@laptop.org>
# Gianluca Montecchi <gian@grys.it>
# W. Trevor King <wking@tremily.us>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import libbe
import libbe.bugdir
import libbe.bug
import libbe.command
import libbe.command.util
import libbe.storage
import libbe.diff
class Diff (libbe.command.Command):
__doc__ = """Compare bug reports with older tree
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False, versioned=True)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = Diff()
>>> original = bd.storage.commit('Original status')
>>> bug = bd.bug_from_uuid('a')
>>> bug.status = 'closed'
>>> changed = bd.storage.commit('Closed bug a')
>>> ret = ui.run(cmd, args=[original])
Modified bugs:
abc/a:cm: Bug A
Changed bug settings:
status: open -> closed
>>> ret = ui.run(cmd, {'subscribe':'%(bugdir_id)s:mod', 'uuids':True}, [original])
a
>>> bd.storage.versioned = False
>>> ret = ui.run(cmd, args=[original])
Traceback (most recent call last):
...
UserError: This repository is not revision-controlled.
>>> ui.cleanup()
>>> bd.cleanup()
""" % {'bugdir_id':libbe.diff.BUGDIR_ID}
name = 'diff'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.options.extend([
libbe.command.Option(name='repo', short_name='r',
help='Compare with repository in REPO instead'
' of the current repository.',
arg=libbe.command.Argument(
name='repo', metavar='REPO',
completion_callback=libbe.command.util.complete_path)),
libbe.command.Option(name='subscribe', short_name='s',
help='Only print changes matching SUBSCRIPTION, '
'subscription is a comma-separated list of ID:TYPE '
'tuples. See `be subscribe --help` for descriptions '
'of ID and TYPE.',
arg=libbe.command.Argument(
name='subscribe', metavar='SUBSCRIPTION')),
libbe.command.Option(name='uuids', short_name='u',
help='Only print the changed bug UUIDS.'),
])
self.args.extend([
libbe.command.Argument(
name='revision', metavar='REVISION', default=None,
optional=True)
])
def _run(self, **params):
try:
subscriptions = libbe.diff.subscriptions_from_string(
params['subscribe'])
except ValueError, e:
raise libbe.command.UserError(e.msg)
bugdirs = self._get_bugdirs()
for uuid,bugdir in sorted(bugdirs.items()):
self.diff(bugdir, subscriptions, params=params)
def diff(self, bugdir, subscriptions, params):
if params['repo'] == None:
if bugdir.storage.versioned == False:
raise libbe.command.UserError(
'This repository is not revision-controlled.')
if params['revision'] == None: # get the most recent revision
params['revision'] = bugdir.storage.revision_id(-1)
old_bd = libbe.bugdir.RevisionedBugDir(bugdir, params['revision'])
else:
old_storage = libbe.storage.get_storage(params['repo'])
old_storage.connect()
old_bd_current = libbe.bugdir.BugDir(old_storage, from_disk=True)
if params['revision'] == None: # use the current working state
old_bd = old_bd_current
else:
if old_bd_current.storage.versioned == False:
raise libbe.command.UserError(
'{} is not revision-controlled.'.format(
bugdir.storage.repo))
old_bd = libbe.bugdir.RevisionedBugDir(old_bd_current,revision)
d = libbe.diff.Diff(old_bd, bugdir)
tree = d.report_tree(subscriptions)
if params['uuids'] == True:
uuids = []
bugs = tree.child_by_path('/bugs')
for bug_type in bugs:
uuids.extend([bug.name for bug in bug_type])
print >> self.stdout, '\n'.join(uuids)
else :
rep = tree.report_string()
if rep != None:
print >> self.stdout, rep
return 0
def _long_help(self):
return """
Uses the storage backend to compare the current tree with a previous
tree, and prints a pretty report. If REVISION is given, it is a
specifier for the particular previous tree to use. Specifiers are
specific to their storage backend.
For Arch your specifier must be a fully-qualified revision name.
Besides the standard summary output, you can use the options to output
UUIDS for the different categories. This output can be used as the
input to 'be show' to get an understanding of the current status.
"""
|
poo12138/gem5-stable | refs/heads/master | src/arch/x86/X86TLB.py | 61 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from BaseTLB import BaseTLB
from MemObject import MemObject
class X86PagetableWalker(MemObject):
type = 'X86PagetableWalker'
cxx_class = 'X86ISA::Walker'
cxx_header = 'arch/x86/pagetable_walker.hh'
port = MasterPort("Port for the hardware table walker")
system = Param.System(Parent.any, "system object")
num_squash_per_cycle = Param.Unsigned(4,
"Number of outstanding walks that can be squashed per cycle")
class X86TLB(BaseTLB):
type = 'X86TLB'
cxx_class = 'X86ISA::TLB'
cxx_header = 'arch/x86/tlb.hh'
size = Param.Unsigned(64, "TLB size")
walker = Param.X86PagetableWalker(\
X86PagetableWalker(), "page table walker")
|
yuewko/neutron | refs/heads/master | neutron/agent/l3/keepalived_state_change.py | 46 | # Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import httplib2
from oslo_config import cfg
from oslo_log import log as logging
import requests
from neutron.agent.l3 import ha
from neutron.agent.linux import daemon
from neutron.agent.linux import ip_monitor
from neutron.agent.linux import utils as agent_utils
from neutron.common import config
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection):
def __init__(self, *args, **kwargs):
# Old style super initialization is required!
agent_utils.UnixDomainHTTPConnection.__init__(
self, *args, **kwargs)
self.socket_path = (
ha.L3AgentKeepalivedStateChangeServer.
get_keepalived_state_change_socket_path(cfg.CONF))
class MonitorDaemon(daemon.Daemon):
def __init__(self, pidfile, router_id, user, group, namespace, conf_dir,
interface, cidr):
self.router_id = router_id
self.namespace = namespace
self.conf_dir = conf_dir
self.interface = interface
self.cidr = cidr
super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
user=user, group=group)
def run(self, run_as_root=False):
monitor = ip_monitor.IPMonitor(namespace=self.namespace,
run_as_root=run_as_root)
monitor.start()
# Only drop privileges if the process is currently running as root
# (The run_as_root variable name here is unfortunate - It means to
# use a root helper when the running process is NOT already running
# as root
if not run_as_root:
super(MonitorDaemon, self).run()
for iterable in monitor:
self.parse_and_handle_event(iterable)
def parse_and_handle_event(self, iterable):
try:
event = ip_monitor.IPMonitorEvent.from_text(iterable)
if event.interface == self.interface and event.cidr == self.cidr:
new_state = 'master' if event.added else 'backup'
self.write_state_change(new_state)
self.notify_agent(new_state)
except Exception:
LOG.exception(_LE(
'Failed to process or handle event for line %s'), iterable)
def write_state_change(self, state):
with open(os.path.join(
self.conf_dir, 'state'), 'w') as state_file:
state_file.write(state)
LOG.debug('Wrote router %s state %s', self.router_id, state)
def notify_agent(self, state):
resp, content = httplib2.Http().request(
# Note that the message is sent via a Unix domain socket so that
# the URL doesn't matter.
'http://127.0.0.1/',
headers={'X-Neutron-Router-Id': self.router_id,
'X-Neutron-State': state},
connection_type=KeepalivedUnixDomainConnection)
if resp.status != requests.codes.ok:
raise Exception(_('Unexpected response: %s') % resp)
LOG.debug('Notified agent router %s, state %s', self.router_id, state)
def register_opts(conf):
conf.register_cli_opt(
cfg.StrOpt('router_id', help=_('ID of the router')))
conf.register_cli_opt(
cfg.StrOpt('namespace', help=_('Namespace of the router')))
conf.register_cli_opt(
cfg.StrOpt('conf_dir', help=_('Path to the router directory')))
conf.register_cli_opt(
cfg.StrOpt('monitor_interface', help=_('Interface to monitor')))
conf.register_cli_opt(
cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor')))
conf.register_cli_opt(
cfg.StrOpt('pid_file', help=_('Path to PID file for this process')))
conf.register_cli_opt(
cfg.StrOpt('user', help=_('User (uid or name) running this process '
'after its initialization')))
conf.register_cli_opt(
cfg.StrOpt('group', help=_('Group (gid or name) running this process '
'after its initialization')))
conf.register_opt(
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')))
def configure(conf):
config.init(sys.argv[1:])
conf.set_override('log_dir', cfg.CONF.conf_dir)
conf.set_override('debug', True)
conf.set_override('verbose', True)
config.setup_logging()
def main():
register_opts(cfg.CONF)
configure(cfg.CONF)
MonitorDaemon(cfg.CONF.pid_file,
cfg.CONF.router_id,
cfg.CONF.user,
cfg.CONF.group,
cfg.CONF.namespace,
cfg.CONF.conf_dir,
cfg.CONF.monitor_interface,
cfg.CONF.monitor_cidr).start()
|
Bysmyyr/chromium-crosswalk | refs/heads/master | tools/cr/cr/auto/client/__init__.py | 138 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A package to hold the modules auto loaded from the client directory.""" |
Ialong/shogun | refs/heads/develop | applications/tapkee/samples/dm.py | 26 | import modshogun as sg
import data
import numpy as np
# load data
feature_matrix = data.swissroll()
# create features instance
features = sg.RealFeatures(feature_matrix)
# create Diffusion Maps converter instance
converter = sg.DiffusionMaps()
# set target dimensionality
converter.set_target_dim(2)
# set number of time-steps
converter.set_t(2)
# set width of gaussian kernel
converter.set_width(10.0)
# create euclidean distance instance
distance = sg.EuclideanDistance()
# enable converter instance to use created distance instance
converter.set_distance(distance)
# compute embedding with Diffusion Maps method
embedding = converter.embed(features)
# compute custom distance matrix
distance_matrix = np.exp(-np.dot(feature_matrix.T,feature_matrix))
# create Custom Kernel instance
custom_distance = sg.CustomDistance(distance_matrix)
# construct embedding based on created distance
distance_embedding = converter.embed_distance(custom_distance)
|
shivam1111/odoo | refs/heads/8.0 | addons/website_gengo/controllers/__init__.py | 7372 | import main
|
syphar/django | refs/heads/master | tests/gis_tests/geogapp/models.py | 336 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City(NamedModel):
point = models.PointField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
class Zipcode(NamedModel):
code = models.CharField(max_length=10)
poly = models.PolygonField(geography=True)
class County(NamedModel):
state = models.CharField(max_length=20)
mpoly = models.MultiPolygonField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
def __str__(self):
return ' County, '.join([self.name, self.state])
|
AnasGhrab/scikit-learn | refs/heads/master | examples/cluster/plot_agglomerative_clustering.py | 343 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
rdblue/Impala | refs/heads/cdh5-trunk | thirdparty/hive-1.1.0-cdh5.5.0-SNAPSHOT/lib/py/thrift/protocol/__init__.py | 83 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__all__ = ['TProtocol', 'TBinaryProtocol', 'fastbinary']
|
molebot/vnpy | refs/heads/master | vn.archive/vn.lts_old/pyscript/l2/generate_data_type.py | 70 | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('SecurityFtdcL2MDUserApiDataType.h','r')
fpy = open('l2_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
print u'data_type.py生成过程完成'
except:
print u'data_type.py生成过程出错'
if __name__ == '__main__':
main()
|
nathantypanski/zombierl | refs/heads/master | status.py | 1 | from libtcod import libtcodpy as libtcod
import console as cons
#import player as P
import map_vars as M
# Create the game status console.
status = ''
status_console = libtcod.console_new(M.SCREEN_WIDTH, (M.SCREEN_HEIGHT - M.MAP_HEIGHT))
libtcod.console_set_alignment(status_console, libtcod.LEFT)
def add_status (new_status):
global status
status = ("%s %s" % (status, new_status))
display_status()
def clear_status ():
global status
status = ""
# Displays the parsed string as a status message to the user.
# Doesn't display strings larger than SCREEN_WIDTH yet.
def display_status ():
global status
if status:
libtcod.console_rect(status_console, 0, 0, M.SCREEN_WIDTH,
(M.SCREEN_HEIGHT - M.MAP_HEIGHT), True)
libtcod.console_set_default_foreground (status_console, libtcod.white)
while len(status) > M.SCREEN_WIDTH*2:
display_statusline(status[:M.SCREEN_WIDTH*2])
key = libtcod.console_wait_for_keypress(True)
while not key.vk == libtcod.KEY_SPACE:
key = libtcod.console_wait_for_keypress(True)
status = status[M.SCREEN_WIDTH*2:]
display_statusline(status)
libtcod.console_blit(status_console,0,0,M.SCREEN_WIDTH,
(M.SCREEN_HEIGHT-M.MAP_HEIGHT-1),0,0,M.MAP_HEIGHT+1,1)
libtcod.console_flush()
else:
display_statusline()
libtcod.console_flush()
def display_statusline (message=""):
global status
# display_player_stats()
for x in range (libtcod.console_get_width(status_console)):
libtcod.console_put_char (status_console, x, 0, ' ', libtcod.BKGND_NONE)
libtcod.console_put_char (status_console, x, 1, ' ', libtcod.BKGND_NONE)
libtcod.console_print_rect_ex(status_console, 1, 0,
M.SCREEN_WIDTH, 2, libtcod.BKGND_NONE, libtcod.LEFT,
message[:M.SCREEN_WIDTH*2].strip())
libtcod.console_blit(status_console,0,0,M.SCREEN_WIDTH,
(M.SCREEN_HEIGHT-M.MAP_HEIGHT-1),0,0,M.MAP_HEIGHT+1,1)
libtcod.console_flush()
# Removed because of mutual imports
#def display_player_stats():
# global status
# libtcod.console_print_ex(status_console, 1, 2, libtcod.BKGND_NONE,
# libtcod.LEFT, P.player.name)
# libtcod.console_print_ex(status_console, len(P.player.name)+2, 2,
# libtcod.BKGND_NONE, libtcod.LEFT, "HP: %s/%s" % (P.player.health,
# P.player.max_health))
|
hainn8x/gnuradio | refs/heads/master | gr-audio/examples/python/audio_fft.py | 68 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, audio
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, waterfallsink2, scopesink2, form, slider
from optparse import OptionParser
import wx
import sys
class app_top_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-W", "--waterfall", action="store_true", default=False,
help="Enable waterfall display")
parser.add_option("-S", "--oscilloscope", action="store_true", default=False,
help="Enable oscilloscope display")
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
sample_rate = int(options.sample_rate)
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.show_debug_info = True
# build the graph
if options.waterfall:
self.scope = \
waterfallsink2.waterfall_sink_f (panel, fft_size=1024, sample_rate=sample_rate)
elif options.oscilloscope:
self.scope = scopesink2.scope_sink_f(panel, sample_rate=sample_rate)
else:
self.scope = fftsink2.fft_sink_f (panel, fft_size=1024, sample_rate=sample_rate, fft_rate=30,
ref_scale=1.0, ref_level=0, y_divs=12)
self.src = audio.source (sample_rate, options.audio_input)
self.connect(self.src, self.scope)
self._build_gui(vbox)
# set initial values
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
vbox.Add(self.scope.win, 10, wx.EXPAND)
#self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
def _form_set_decim(kv):
return self.set_decim(kv['decim'])
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['decim'] = form.int_field(
parent=panel, sizer=hbox, label="Decim",
callback=myform.check_input_and_call(_form_set_decim, self._set_status_msg))
hbox.Add((5,0), 1)
myform['fs@usb'] = form.static_float_field(
parent=panel, sizer=hbox, label="Fs@USB")
hbox.Add((5,0), 1)
myform['dbname'] = form.static_text_field(
parent=panel, sizer=hbox)
hbox.Add((5,0), 1)
myform['baseband'] = form.static_float_field(
parent=panel, sizer=hbox, label="Analog BB")
hbox.Add((5,0), 1)
myform['ddc'] = form.static_float_field(
parent=panel, sizer=hbox, label="DDC")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def main ():
app = stdgui2.stdapp(app_top_block, "Audio FFT", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
|
blockstack/secret-sharing | refs/heads/master | unit_tests.py | 3 | # -*- coding: utf-8 -*-
"""
Secret Sharing
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import random
import unittest
from test import test_support
from utilitybelt import base64_chars
from secretsharing import secret_int_to_points, points_to_secret_int, \
point_to_share_string, share_string_to_point, SecretSharer, \
HexToHexSecretSharer, PlaintextToHexSecretSharer, \
BitcoinToB58SecretSharer, BitcoinToB32SecretSharer, \
BitcoinToZB32SecretSharer
class ShamirSharingTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def split_and_recover_secret(self, sharer_class, m, n, secret):
shares = sharer_class.split_secret(secret, m, n)
random.shuffle(shares)
recovered_secret = sharer_class.recover_secret(shares[0:m])
assert(recovered_secret == secret)
def test_hex_to_hex_sharing(self):
recovered_secret = self.split_and_recover_secret(
SecretSharer, 3, 5,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_printable_ascii_to_hex_sharing(self):
recovered_secret = self.split_and_recover_secret(
PlaintextToHexSecretSharer, 3, 5,
"correct horse battery staple")
def test_b58_to_b32_sharing(self):
recovered_secret = self.split_and_recover_secret(
BitcoinToB32SecretSharer, 3, 5,
"5KJvsngHeMpm884wtkJNzQGaCErckhHJBGFsvd3VyK5qMZXj3hS")
def test_b58_to_zb32_sharing(self):
recovered_secret = self.split_and_recover_secret(
BitcoinToZB32SecretSharer, 3, 5,
"5KJvsngHeMpm884wtkJNzQGaCErckhHJBGFsvd3VyK5qMZXj3hS")
def test_b58_to_b58_sharing(self):
recovered_secret = self.split_and_recover_secret(
BitcoinToB58SecretSharer, 3, 5,
"5KJvsngHeMpm884wtkJNzQGaCErckhHJBGFsvd3VyK5qMZXj3hS")
def test_hex_to_base64_sharing(self):
sharer_class = SecretSharer
sharer_class.share_charset = base64_chars
recovered_secret = self.split_and_recover_secret(
sharer_class, 3, 5,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_2_of_3_sharing(self):
recovered_secret = self.split_and_recover_secret(
SecretSharer, 2, 3,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_4_of_7_sharing(self):
recovered_secret = self.split_and_recover_secret(
SecretSharer, 4, 7,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_5_of_9_sharing(self):
recovered_secret = self.split_and_recover_secret(
SecretSharer, 5, 9,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_2_of_2_sharing(self):
recovered_secret = self.split_and_recover_secret(
SecretSharer, 2, 2,
"c4bbcb1fbec99d65bf59d85c8cb62ee2db963f0fe106f483d9afa73bd4e39a8a")
def test_main():
test_support.run_unittest(
ShamirSharingTest
)
if __name__ == '__main__':
test_main() |
markliederbach/blog-backend | refs/heads/master | blog_backend/users/tests/test_admin.py | 117 | from test_plus.test import TestCase
from ..admin import MyUserCreationForm
class TestMyUserCreationForm(TestCase):
def setUp(self):
self.user = self.make_user('notalamode', 'notalamodespassword')
def test_clean_username_success(self):
# Instantiate the form with a new username
form = MyUserCreationForm({
'username': 'alamode',
'password1': '7jefB#f@Cc7YJB]2v',
'password2': '7jefB#f@Cc7YJB]2v',
})
# Run is_valid() to trigger the validation
valid = form.is_valid()
self.assertTrue(valid)
# Run the actual clean_username method
username = form.clean_username()
self.assertEqual('alamode', username)
def test_clean_username_false(self):
# Instantiate the form with the same username as self.user
form = MyUserCreationForm({
'username': self.user.username,
'password1': 'notalamodespassword',
'password2': 'notalamodespassword',
})
# Run is_valid() to trigger the validation, which is going to fail
# because the username is already taken
valid = form.is_valid()
self.assertFalse(valid)
# The form.errors dict should contain a single error called 'username'
self.assertTrue(len(form.errors) == 1)
self.assertTrue('username' in form.errors)
|
bitcraft/pyglet | refs/heads/master | tests/interactive/font/wrap_invariant.py | 1 | """Test that text will not wrap when its width is set to its calculated
width.
You should be able to clearly see "TEST TEST" on a single line (not two)
and "SPAM SPAM SPAM" over two lines, not three.
"""
import unittest
from . import base_text
from pyglet import font
class TEST_WRAP_INVARIANT(base_text.TextTestBase):
font_name = ''
text = 'TEST TEST'
def render(self):
fnt = font.load('', 24)
self.label1 = font.Text(fnt, 'TEST TEST', 10, 150)
self.label1.width = self.label1.width + 1
self.label2 = font.Text(fnt, 'SPAM SPAM\nSPAM', 10, 50)
self.label2.width = self.label2.width + 1
def draw(self):
self.label1.draw()
self.label2.draw()
|
SUSE/azure-sdk-for-python | refs/heads/master | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/iaas_vmilr_registration_request.py | 4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .ilr_request import ILRRequest
class IaasVMILRRegistrationRequest(ILRRequest):
"""Restore files/folders from a backup copy of IaaS VM.
:param object_type: Polymorphic Discriminator
:type object_type: str
:param recovery_point_id: ID of the IaaS VM backup copy from where the
files/folders have to be restored.
:type recovery_point_id: str
:param virtual_machine_id: Fully qualified ARM ID of the virtual machine
whose the files / folders have to be restored.
:type virtual_machine_id: str
:param initiator_name: iSCSI initiator name.
:type initiator_name: str
:param renew_existing_registration: Whether to renew existing registration
with the iSCSI server.
:type renew_existing_registration: bool
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'recovery_point_id': {'key': 'recoveryPointId', 'type': 'str'},
'virtual_machine_id': {'key': 'virtualMachineId', 'type': 'str'},
'initiator_name': {'key': 'initiatorName', 'type': 'str'},
'renew_existing_registration': {'key': 'renewExistingRegistration', 'type': 'bool'},
}
def __init__(self, recovery_point_id=None, virtual_machine_id=None, initiator_name=None, renew_existing_registration=None):
super(IaasVMILRRegistrationRequest, self).__init__()
self.recovery_point_id = recovery_point_id
self.virtual_machine_id = virtual_machine_id
self.initiator_name = initiator_name
self.renew_existing_registration = renew_existing_registration
self.object_type = 'IaasVMILRRegistrationRequest'
|
nosun/pyspider | refs/heads/master | pyspider/libs/sample_handler.py | 67 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on __DATE__
# Project: __PROJECT_NAME__
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('__START_URL__', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
return {
"url": response.url,
"title": response.doc('title').text(),
}
|
tiagormk/gem5-hmp | refs/heads/master | tests/configs/tgen-simple-dram.py | 4 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have protobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-simple-dram.cfg")
# system simulated
system = System(cpu = cpu, physmem = DDR3_1600_x64(),
membus = NoncoherentBus(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz'))
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
|
depboy/myriad | refs/heads/master | contrib/spendfrom/setup.py | 2104 | from distutils.core import setup
setup(name='btcspendfrom',
version='1.0',
description='Command-line utility for bitcoin "coin control"',
author='Gavin Andresen',
author_email='gavin@bitcoinfoundation.org',
requires=['jsonrpc'],
scripts=['spendfrom.py'],
)
|
onurmatik/django-timeline | refs/heads/master | timeline/__init__.py | 12133432 | |
torquemad/pixelated-user-agent | refs/heads/master | service/test/unit/fixtures/__init__.py | 12133432 | |
ryangallen/django | refs/heads/master | django/db/backends/base/__init__.py | 12133432 | |
boa19861105/android_LP5.0.2_kernel_htc_dlxub1 | refs/heads/master | tools/perf/scripts/python/sched-migration.py | 11215 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
ekonstantinidis/django-rest-framework-docs | refs/heads/master | rest_framework_docs/api_endpoint.py | 2 | import json
import inspect
from django.contrib.admindocs.views import simplify_regex
from django.utils.encoding import force_str
from rest_framework.viewsets import ModelViewSet
from rest_framework.serializers import BaseSerializer
VIEWSET_METHODS = {
'List': ['get', 'post'],
'Instance': ['get', 'put', 'patch', 'delete'],
}
class ApiEndpoint(object):
def __init__(self, pattern, parent_regex=None, drf_router=None):
self.drf_router = drf_router
self.pattern = pattern
self.callback = pattern.callback
# self.name = pattern.name
self.docstring = self.__get_docstring__()
self.name_parent = simplify_regex(parent_regex).strip('/') if parent_regex else None
self.path = self.__get_path__(parent_regex)
self.allowed_methods = self.__get_allowed_methods__()
# self.view_name = pattern.callback.__name__
self.errors = None
self.serializer_class = self.__get_serializer_class__()
if self.serializer_class:
self.serializer = self.__get_serializer__()
self.fields = self.__get_serializer_fields__(self.serializer)
self.fields_json = self.__get_serializer_fields_json__()
self.permissions = self.__get_permissions_class__()
def __get_path__(self, parent_regex):
if parent_regex:
return "/{0}{1}".format(self.name_parent, simplify_regex(self.pattern.regex.pattern))
return simplify_regex(self.pattern.regex.pattern)
def is_method_allowed(self, callback_cls, method_name):
has_attr = hasattr(callback_cls, method_name)
viewset_method = (issubclass(callback_cls, ModelViewSet) and
method_name in VIEWSET_METHODS.get(self.callback.suffix, []))
return has_attr or viewset_method
def __get_allowed_methods__(self):
viewset_methods = []
if self.drf_router:
for prefix, viewset, basename in self.drf_router.registry:
if self.callback.cls != viewset:
continue
lookup = self.drf_router.get_lookup_regex(viewset)
routes = self.drf_router.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be bound
mapping = self.drf_router.get_method_map(viewset, route.mapping)
if not mapping:
continue
# Build the url pattern
regex = route.url.format(
prefix=prefix,
lookup=lookup,
trailing_slash=self.drf_router.trailing_slash
)
if self.pattern.regex.pattern == regex:
funcs, viewset_methods = zip(
*[(mapping[m], m.upper())
for m in self.callback.cls.http_method_names
if m in mapping]
)
viewset_methods = list(viewset_methods)
if len(set(funcs)) == 1:
self.docstring = inspect.getdoc(getattr(self.callback.cls, funcs[0]))
view_methods = [force_str(m).upper()
for m in self.callback.cls.http_method_names
if self.is_method_allowed(self.callback.cls, m)]
return sorted(viewset_methods + view_methods)
def __get_docstring__(self):
return inspect.getdoc(self.callback)
def __get_permissions_class__(self):
for perm_class in self.pattern.callback.cls.permission_classes:
return perm_class.__name__
def __get_serializer__(self):
try:
return self.serializer_class()
except KeyError as e:
self.errors = e
def __get_serializer_class__(self):
if hasattr(self.callback.cls, 'serializer_class'):
return self.callback.cls.serializer_class
if hasattr(self.callback.cls, 'get_serializer_class'):
return self.callback.cls.get_serializer_class(self.pattern.callback.cls())
def __get_serializer_fields__(self, serializer):
fields = []
if hasattr(serializer, 'get_fields'):
for key, field in serializer.get_fields().items():
to_many_relation = True if hasattr(field, 'many') else False
sub_fields = []
if to_many_relation:
sub_fields = self.__get_serializer_fields__(field.child) if isinstance(field, BaseSerializer) else None
else:
sub_fields = self.__get_serializer_fields__(field) if isinstance(field, BaseSerializer) else None
fields.append({
"name": key,
"type": str(field.__class__.__name__),
"sub_fields": sub_fields,
"required": field.required,
"to_many_relation": to_many_relation
})
# FIXME:
# Show more attibutes of `field`?
return fields
def __get_serializer_fields_json__(self):
# FIXME:
# Return JSON or not?
return json.dumps(self.fields)
|
DamnWidget/txorm | refs/heads/master | txorm/_compat/txorm_python3.py | 1 | # -*- test-case-name: txorm.test.test_property -*-
# Copyright (c) 2014 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for details
from ..property import PropertyRegisterMeta
class TxORM(object, metaclass=PropertyRegisterMeta):
"""Causes subclasses to be associated with a TxORM PropertyRegister
This is necessary to be able to spcify References with strings
"""
|
helfertool/helfertool | refs/heads/dev | src/registration/urls.py | 1 | from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
from .feeds import HelperFeed
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^all/$', views.index_all_events, name='index_all_events'),
# about
url(r'^about/$',
TemplateView.as_view(template_name='registration/about.html'),
name='about'),
# admin interface
url(r'^manage/$',
views.admin,
name='admin'),
url(r'^manage/new/$',
views.edit_event,
name='new_event'),
url(r'^manage/past/$',
views.past_events,
name='past_events'),
# registration
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/$',
views.form,
name='form'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/registered/'
r'(?P<helper_id>[a-z0-9\-]+)/$',
views.registered,
name='registered'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/validate/'
r'(?P<helper_id>[a-z0-9\-]+)/$',
views.validate,
name='validate'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/ical/'
r'(?P<helper_id>[a-z0-9\-]+)/$',
HelperFeed(),
name='ical'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/update/'
r'(?P<helper_id>[a-z0-9\-]+)/$',
views.update_personal,
name='update_personal'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/deregister/'
r'(?P<helper_id>[a-z0-9\-]+)/(?P<shift_pk>[0-9]+)/$',
views.deregister,
name='deregister'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/deleted/',
views.deleted,
name='deleted'),
# manage event
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/edit/$',
views.edit_event,
name='edit_event'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/edit/admins/$',
views.edit_event_admins,
name='edit_event_admins'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/delete/$',
views.delete_event,
name='delete_event'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/archive/$',
views.archive_event,
name='archive_event'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/duplicate/$',
views.duplicate_event,
name='duplicate_event'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/move/$',
views.move_event,
name='move_event'),
# jobs
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/$',
views.jobs_and_shifts,
name='jobs_and_shifts'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/new/$',
views.edit_job,
name='new_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/sort/$',
views.sort_job,
name='sort_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/edit/$',
views.edit_job,
name='edit_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/delete/$',
views.delete_job,
name='delete_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/'
r'duplicate/$',
views.duplicate_job,
name='duplicate_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/'
r'duplicate/day/$',
views.duplicate_job_day,
name='duplicate_job_day'),
# shifts
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/shift/'
r'(?P<shift_pk>[0-9]+)/$',
views.edit_shift,
name='edit_shift'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/shift/'
r'(?P<shift_pk>[0-9]+)/delete/$',
views.delete_shift,
name='delete_shift'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/jobs/(?P<job_pk>[0-9]+)/shift/'
r'new/$',
views.edit_shift,
name='new_shift'),
# helpers
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/$',
views.helpers,
name='helpers'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/job/(?P<job_pk>[0-9]+)/$',
views.helpers_for_job,
name='helpers_for_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/search/$',
views.search_helper,
name='search_helper'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/$',
views.view_helper,
name='view_helper'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/edit/$',
views.edit_helper,
name='edit_helper'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/delete/(?P<shift_pk>[0-9]+)/$',
views.delete_helper,
name='delete_helper'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/delete/(?P<shift_pk>[0-9]+)/all/$',
views.delete_helper,
{'show_all_shifts': True},
name='delete_helper_all'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/delete/coordinator/(?P<job_pk>[0-9]+)/$',
views.delete_coordinator,
name='delete_coordinator'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/add/'
r'(?P<shift_pk>[0-9]+)/$',
views.add_helper,
name='add_helper'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/add/coordinator/'
r'(?P<job_pk>[0-9]+)/$',
views.add_coordinator,
name='add_coordinator'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/add/shift/$',
views.add_helper_to_shift,
name='add_helper_to_shift'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/add/coordinator/$',
views.add_helper_as_coordinator,
name='add_helper_as_coordinator'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/helpers/'
r'(?P<helper_pk>[0-9a-f\-]+)/resend/$',
views.resend_mail,
name='resend_mail'),
# export
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/export/(?P<filetype>[a-z]+)/all/$',
views.export,
name='export'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/export/(?P<filetype>[a-z]+)/all/'
r'(?P<date_str>\d{4}-\d{2}-\d{2})/$',
views.export,
name='export_date'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/export/(?P<filetype>[a-z]+)/'
r'(?P<job_pk>[0-9]+)/$',
views.export,
name='export_job'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/export/(?P<filetype>[a-z]+)/'
r'(?P<job_pk>[0-9]+)/(?P<date_str>\d{4}-\d{2}-\d{2})/$',
views.export,
name='export_job_date'),
# vacant shifts
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/vacant/$',
views.vacant_shifts,
name='vacant_shifts'),
# summaries
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/coordinators/$',
views.coordinators,
name='coordinators'),
# manage links
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/links/$',
views.links,
name='links'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/links/add/$',
views.edit_link,
name='add_link'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/links/(?P<link_pk>[0-9a-f\-]+)/$',
views.edit_link,
name='edit_link'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/links/(?P<link_pk>[0-9a-f\-]+)/'
r'delete/$',
views.delete_link,
name='delete_link'),
# duplicates
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/duplicates/$',
views.duplicates,
name='duplicates'),
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/duplicates/merge/(?P<email>.+)/$',
views.merge,
name='merge'),
# use links
url(r'^(?P<event_url_name>[a-zA-Z0-9]+)/l/(?P<link_pk>[0-9a-f\-]+)/$',
views.form,
name='form_for_link'),
]
|
timsnyder/bokeh | refs/heads/master | bokeh/command/subcommands/tests/test_json.py | 2 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import argparse
import os
import sys
# External imports
# Bokeh imports
from bokeh.command.bootstrap import main
from bokeh._testing.util.filesystem import TmpDir, WorkingDir, with_directory_contents
from . import basic_scatter_script
# Module under test
import bokeh.command.subcommands.json as scjson
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
is_python2 = sys.version_info[0] == 2
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scjson.JSON(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scjson.JSON.name == "json"
def test_help():
assert scjson.JSON.help == "Create JSON files for one or more applications"
def test_args():
assert scjson.JSON.args == (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='+',
help="The app directories or scripts to generate JSON for",
default=None
)),
('--indent', dict(
metavar='LEVEL',
type=int,
help="indentation to use when printing",
default=None
)),
(('-o', '--output'), dict(
metavar='FILENAME',
action='append',
type=str,
help="Name of the output file or - for standard output."
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
)
def test_no_script(capsys):
with (TmpDir(prefix="bokeh-json-no-script")) as dirname:
with WorkingDir(dirname):
with pytest.raises(SystemExit):
main(["bokeh", "json"])
out, err = capsys.readouterr()
if is_python2:
too_few = "too few arguments"
else:
too_few = "the following arguments are required: DIRECTORY-OR-SCRIPT"
assert err == """usage: bokeh json [-h] [--indent LEVEL] [-o FILENAME] [--args ...]
DIRECTORY-OR-SCRIPT [DIRECTORY-OR-SCRIPT ...]
bokeh json: error: %s
""" % (too_few)
assert out == ""
def test_basic_script(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["scatter.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
def test_basic_script_with_output_after(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "scatter.py", "--output", "foo.json"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
def test_basic_script_with_output_before(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "--output", "foo.json", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
gavioto/tapiriik | refs/heads/master | tapiriik/web/views/account.py | 16 | from tapiriik.sync import Sync
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.shortcuts import redirect
from tapiriik.auth import User
import json
import dateutil.parser
@require_POST
def account_setemail(req):
if not req.user:
return HttpResponse(status=403)
User.SetEmail(req.user, req.POST["email"])
return redirect("dashboard")
@require_POST
def account_settimezone(req):
if not req.user:
return HttpResponse(status=403)
User.SetTimezone(req.user, req.POST["timezone"])
return HttpResponse()
@require_POST
def account_setconfig(req):
if not req.user:
return HttpResponse(status=403)
data = json.loads(req.body.decode("utf-8"))
if data["sync_skip_before"] and len(data["sync_skip_before"]):
data["sync_skip_before"] = dateutil.parser.parse(data["sync_skip_before"])
User.SetConfiguration(req.user, data)
Sync.SetNextSyncIsExhaustive(req.user, True)
return HttpResponse() |
syaiful6/django | refs/heads/master | tests/many_to_one/__init__.py | 12133432 | |
zedr/django | refs/heads/master | tests/null_fk/__init__.py | 12133432 | |
jamielennox/tempest | refs/heads/master | tempest/thirdparty/__init__.py | 12133432 | |
weolar/miniblink49 | refs/heads/master | third_party/skia/tools/__init__.py | 12133432 | |
luotao1/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py | 2 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
from paddle import fluid
import os
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
paddle.enable_static()
class TestFleetLarsMetaOptimizer(unittest.TestCase):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "1"
os.environ[
"PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002"
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32')
input_y = paddle.fluid.layers.data(
name="y", shape=[1], dtype='int64')
fc_1 = paddle.fluid.layers.fc(input=input_x,
size=64,
act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=256, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2],
size=2,
act='softmax')
cost = paddle.fluid.layers.cross_entropy(
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lars = True
strategy.lars_configs = {
"lars_coeff": 0.001,
"lars_weight_decay": 0.0005,
"epsilon": 0,
"exclude_from_weight_decay": ["batch_norm", ".b"],
}
return avg_cost, strategy
def test_lars_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.01, momentum=0.9)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lars_momentum', ops)
def test_lars_not_apply_with_adam(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertNotIn('lars_momentum', ops)
def test_lars_exclude_fn(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.01, momentum=0.9)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops_without_wd = [
op for op in avg_cost.block.ops
if op.type == 'lars_momentum' and ("batch_norm" in op.attr(
'op_role_var')[0] or ".b" in op.attr('op_role_var')[0])
]
for op in ops_without_wd:
self.assertEqual(op.attr('lars_weight_decay'), 0)
def test_lars_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.fluid.layers.data(
name="x", shape=[32], dtype='float32')
input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64')
fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax')
cost = paddle.fluid.layers.cross_entropy(
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2,
"incr_every_n_steps": 1000,
"incr_ratio": 2.0,
"use_dynamic_loss_scaling": True,
"decr_ratio": 0.5,
"custom_white_list": ['softmax'],
"custom_black_list": ['tanh'],
}
strategy.lars = True
strategy.lars_configs = {
"lars_coeff": 0.001,
"lars_weight_decay": 0.0005,
"epsilon": 0,
"exclude_from_weight_decay": ["batch_norm", ".b"],
}
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.01, momentum=0.9)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lars_momentum', ops)
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
if __name__ == "__main__":
unittest.main()
|
di0spyr0s/pants | refs/heads/master | src/python/pants/backend/codegen/targets/jaxb_library.py | 15 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
class JaxbLibrary(JvmTarget):
"""Generates a stub Java library from jaxb xsd files."""
def __init__(self, payload=None, package=None, language='java', **kwargs):
"""
:param package: java package (com.company.package) in which to generate the output java files.
If unspecified, Pants guesses it from the file path leading to the schema
(xsd) file. This guess is accurate only if the .xsd file is in a path like
``.../com/company/package/schema.xsd``. Pants looks for packages that start with 'com', 'org',
or 'net'.
:param string language: only 'java' is supported. Default: 'java'
"""
payload = payload or Payload()
payload.add_fields({
'package': PrimitiveField(package),
'jaxb_language': PrimitiveField(language),
})
super(JaxbLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('codegen')
self.add_labels('jaxb')
if language != 'java':
raise ValueError('Language "{lang}" not supported for {class_type}'
.format(lang=language, class_type=type(self).__name__))
@property
def package(self):
return self.payload.package
|
nicky-ji/edx-nicky | refs/heads/master | lms/djangoapps/course_wiki/plugins/__init__.py | 12133432 | |
VigTech/Vigtech-Services | refs/heads/master | env/lib/python2.7/site-packages/django/conf/locale/fi/__init__.py | 12133432 | |
acourtney2015/boto | refs/heads/develop | tests/integration/elastictranscoder/test_layer1.py | 114 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from boto.elastictranscoder.layer1 import ElasticTranscoderConnection
from boto.elastictranscoder.exceptions import ValidationException
from tests.compat import unittest
import boto.s3
import boto.sns
import boto.iam
import boto.sns
class TestETSLayer1PipelineManagement(unittest.TestCase):
def setUp(self):
self.api = ElasticTranscoderConnection()
self.s3 = boto.connect_s3()
self.sns = boto.connect_sns()
self.iam = boto.connect_iam()
self.sns = boto.connect_sns()
self.timestamp = str(int(time.time()))
self.input_bucket = 'boto-pipeline-%s' % self.timestamp
self.output_bucket = 'boto-pipeline-out-%s' % self.timestamp
self.role_name = 'boto-ets-role-%s' % self.timestamp
self.pipeline_name = 'boto-pipeline-%s' % self.timestamp
self.s3.create_bucket(self.input_bucket)
self.s3.create_bucket(self.output_bucket)
self.addCleanup(self.s3.delete_bucket, self.input_bucket)
self.addCleanup(self.s3.delete_bucket, self.output_bucket)
self.role = self.iam.create_role(self.role_name)
self.role_arn = self.role['create_role_response']['create_role_result']\
['role']['arn']
self.addCleanup(self.iam.delete_role, self.role_name)
def create_pipeline(self):
pipeline = self.api.create_pipeline(
self.pipeline_name, self.input_bucket,
self.output_bucket, self.role_arn,
{'Progressing': '', 'Completed': '', 'Warning': '', 'Error': ''})
pipeline_id = pipeline['Pipeline']['Id']
self.addCleanup(self.api.delete_pipeline, pipeline_id)
return pipeline_id
def test_create_delete_pipeline(self):
pipeline = self.api.create_pipeline(
self.pipeline_name, self.input_bucket,
self.output_bucket, self.role_arn,
{'Progressing': '', 'Completed': '', 'Warning': '', 'Error': ''})
pipeline_id = pipeline['Pipeline']['Id']
self.api.delete_pipeline(pipeline_id)
def test_can_retrieve_pipeline_information(self):
pipeline_id = self.create_pipeline()
# The pipeline shows up in list_pipelines
pipelines = self.api.list_pipelines()['Pipelines']
pipeline_names = [p['Name'] for p in pipelines]
self.assertIn(self.pipeline_name, pipeline_names)
# The pipeline shows up in read_pipeline
response = self.api.read_pipeline(pipeline_id)
self.assertEqual(response['Pipeline']['Id'], pipeline_id)
def test_update_pipeline(self):
pipeline_id = self.create_pipeline()
self.api.update_pipeline_status(pipeline_id, 'Paused')
response = self.api.read_pipeline(pipeline_id)
self.assertEqual(response['Pipeline']['Status'], 'Paused')
def test_update_pipeline_notification(self):
pipeline_id = self.create_pipeline()
response = self.sns.create_topic('pipeline-errors')
topic_arn = response['CreateTopicResponse']['CreateTopicResult']\
['TopicArn']
self.addCleanup(self.sns.delete_topic, topic_arn)
self.api.update_pipeline_notifications(
pipeline_id,
{'Progressing': '', 'Completed': '',
'Warning': '', 'Error': topic_arn})
response = self.api.read_pipeline(pipeline_id)
self.assertEqual(response['Pipeline']['Notifications']['Error'],
topic_arn)
def test_list_jobs_by_pipeline(self):
pipeline_id = self.create_pipeline()
response = self.api.list_jobs_by_pipeline(pipeline_id)
self.assertEqual(response['Jobs'], [])
def test_proper_error_when_pipeline_does_not_exist(self):
with self.assertRaises(ValidationException):
self.api.read_pipeline('badpipelineid')
|
Argon-Zhou/django | refs/heads/master | django/db/backends/mysql/validation.py | 368 | from django.core import checks
from django.db.backends.base.validation import BaseDatabaseValidation
class DatabaseValidation(BaseDatabaseValidation):
def check_field(self, field, **kwargs):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
from django.db import connection
errors = super(DatabaseValidation, self).check_field(field, **kwargs)
# Ignore any related fields.
if getattr(field, 'remote_field', None) is None:
field_type = field.db_type(connection)
# Ignore any non-concrete fields
if field_type is None:
return errors
if (field_type.startswith('varchar') # Look for CharFields...
and field.unique # ... that are unique
and (field.max_length is None or int(field.max_length) > 255)):
errors.append(
checks.Error(
('MySQL does not allow unique CharFields to have a max_length > 255.'),
hint=None,
obj=field,
id='mysql.E001',
)
)
return errors
|
kiszk/spark | refs/heads/master | examples/src/main/python/ml/decision_tree_regression_example.py | 128 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Regression Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.regression import DecisionTreeRegressor
from pyspark.ml.feature import VectorIndexer
from pyspark.ml.evaluation import RegressionEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeRegressionExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeRegressor(featuresCol="indexedFeatures")
# Chain indexer and tree in a Pipeline
pipeline = Pipeline(stages=[featureIndexer, dt])
# Train model. This also runs the indexer.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "label", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="rmse")
rmse = evaluator.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
treeModel = model.stages[1]
# summary only
print(treeModel)
# $example off$
spark.stop()
|
EdisonAlgorithms/LeetCode | refs/heads/master | vol6/expression-add-operators/expression-add-operators.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-24 02:03:32
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-24 02:03:44
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
def dfs(num, target, expr = '', val = 1):
ans = []
if (num == '0' or num[0] != '0') and (int(num) * val == target):
ans.append(num + expr)
for i in range(len(num) - 1):
left_num, right_num = num[:i + 1], num[i + 1:]
if right_num == '0' or right_num[0] != '0':
right, rightVal = right_num + expr, int(right_num) * val
for left in dfs(left_num, target - rightVal):
ans.append(left + '+' + right)
for left in dfs(left_num, target + rightVal):
ans.append(left + '-' + right)
for left in dfs(left_num, target, '*' + right, rightVal):
ans.append(left)
return ans
if not num:
return []
return dfs(num, target) |
boltnev/iktomi | refs/heads/master | iktomi/forms/form.py | 3 | # -*- coding: utf-8 -*-
from webob.multidict import MultiDict
import six
from . import convs
from .perms import DEFAULT_PERMISSIONS
from .fields import FieldBlock
class FormValidationMetaClass(type):
'''
Metaclass to assert that some obsolete methods are not used.
can be removed from iktomi after all existing code is cleaned up.
'''
def __new__(mcs, name, bases, dict_):
if any([x.startswith('clean__') for x in dict_]):
raise TypeError('Form clean__ methods are obsolete')
return type.__new__(mcs, name, bases, dict_)
class Form(six.with_metaclass(FormValidationMetaClass, object)):
template = 'forms/default'
permissions = DEFAULT_PERMISSIONS
id = ''
def __init__(self, env=None, initial=None, name=None, permissions=None):
initial = initial or {}
self.env = env
self.name = name
self.raw_data = MultiDict()
# NOTE: `initial` is used to set initial display values for fields.
# If you provide initial value for some aggregated field
# you need to provide values for all fields that are in that
# aggregated field, including `None` as empty values.
self.initial = initial
self.python_data = initial.copy()
# clone all fields
self.fields = [field(parent=self) for field in self.fields]
if permissions is None:
permissions = self.permissions
self.permissions = set(permissions)
for field in self.fields:
# NOTE: we do not put `get_initial()` call result in `self.initial`
# because it may differ for each call
self.python_data.update(field.load_initial(initial, self.raw_data))
self.errors = {}
@property
def form(self):
return self
@property
def prefix(self):
'''A prefix for names of field inputs'''
if self.name:
return self.name+':'
else:
return ''
def render(self):
'''Proxy method to form's environment render method'''
return self.env.template.render(self.template, form=self)
@property
def is_valid(self):
'''Is true if validated form as no errors'''
return not self.errors
def accept(self, data):
'''
Try to accpet MultiDict-like object and return if it is valid.
'''
self.raw_data = MultiDict(data)
self.errors = {}
for field in self.fields:
if field.writable:
self.python_data.update(field.accept())
else:
for name in field.field_names:
# readonly field
subfield = self.get_field(name)
value = self.python_data[subfield.name]
subfield.set_raw_value(self.raw_data, subfield.from_python(value))
return self.is_valid
def get_field(self, name):
'''
Gets field by input name
'''
names = name.split('.', 1)
for field in self.fields:
if isinstance(field, FieldBlock):
result = field.get_field(name)
if result is not None:
return result
if field.name == names[0]:
if len(names) > 1:
return field.get_field(names[1])
return field
return None
def get_data(self, compact=True):
'''
Returns data representing current state of the form. While
Form.raw_data may contain alien fields and invalid data, this method
returns only valid fields that belong to this form only. It's designed
to pass somewhere current state of the form (as query string or by
other means).
'''
data = MultiDict()
for field in self.fields:
raw_value = field.from_python(self.python_data[field.name])
field.set_raw_value(data, raw_value)
if compact:
data = MultiDict([(k, v) for k, v in data.items() if v])
return data
def get_help(self):
return None
|
areitz/pants | refs/heads/master | tests/python/pants_test/tasks/false.py | 26 | #!/usr/bin/python
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
#This works just like /bin/false, but Windows users might not have that
sys.exit(1)
|
notapresent/rbm2m | refs/heads/master | rbm2m/action/scan_manager.py | 1 | # -*- coding: utf-8 -*-
import datetime
import logging
from sqlalchemy import and_, func
from base_manager import BaseManager
from ..models import Scan, scan_records, Genre
# All scans with no activity for this long are considered stalled
INACTIVITY_PERIOD = datetime.timedelta(seconds=600)
# Update interval
UPDATE_INTERVAL = datetime.timedelta(days=1)
logger = logging.getLogger(__name__)
class ScanManager(BaseManager):
"""
Handles all DB interactions regarding scans
"""
__model__ = Scan
def get_current_scans(self, genre_id=None):
"""
Returns currently running scans for genre (or all genres if genre_id is None)
:return list of Scans
"""
query = (
self.session.query(Scan)
.filter(Scan.status.in_(['queued', 'running']))
)
if genre_id:
query = query.filter(Scan.genre_id == genre_id)
return query.all()
def last_scans(self):
return (
self.session.query(Scan)
.order_by(Scan.started_at.desc())
.limit(50)
.all()
)
def records_not_in_scan(self, scan_id, rec_ids):
result = (
self.session.query(scan_records.c.record_id)
.filter(scan_records.c.scan_id == scan_id)
.filter(scan_records.c.record_id.in_(rec_ids))
.all()
)
in_scan = [rec_id for rec_id, in result]
return list(set(rec_ids) - set(in_scan))
def get_stalled_scans(self):
"""
Mark scans with no activity during last INACTIVITY_THRESHOLD seconds as failed
:return: List of stalled scans
"""
threshold = datetime.datetime.utcnow() - INACTIVITY_PERIOD
active_scans = (
self.session.query(Scan)
.filter(Scan.status.in_(['queued', 'running']))
.all()
)
rv = [s for s in active_scans if s.last_action < threshold]
return rv
def get_genre_with_no_scans_in_24h(self):
"""
Find one imported genre for which there were no succesful scans in last day
:return: Genre
"""
threshold = datetime.datetime.utcnow() - UPDATE_INTERVAL
q = (
self.session.query(Genre)
.select_from(Scan)
.join(Genre)
.filter(Scan.status == 'success')
.filter(Genre.import_enabled.is_(True))
.group_by(Scan.genre_id)
.having(func.max(Scan.started_at) < threshold)
)
return q.first()
def get_genre_with_no_scans(self):
"""
Find one imported genre for which there were no successful scans at all
:return: Genre
"""
q = (
self.session.query(Genre)
.outerjoin(Scan,
and_(
Scan.genre_id == Genre.id,
Scan.status == 'success')
)
.filter(Genre.import_enabled.is_(True))
.filter(Scan.id.is_(None))
)
return q.first()
def clean_up_old_scans(self):
"""
Delete all scans older than 7 days from now
"""
threshold = datetime.datetime.utcnow() - datetime.timedelta(days=7)
self.session.query(Scan).filter(Scan.started_at < threshold).delete()
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/shapely/speedups/__init__.py | 4 | import warnings
from shapely.geometry import linestring, polygon
from shapely import coords
try:
from shapely.speedups import _speedups
available = True
import_error_msg = None
except ImportError:
import sys
available = False
import_error_msg = tuple(sys.exc_info()[1])
__all__ = ['available', 'enable', 'disable']
_orig = {}
def enable():
if not available:
warnings.warn("shapely.speedups not available", RuntimeWarning)
return
if _orig:
return
_orig['CoordinateSequence.ctypes'] = coords.CoordinateSequence.ctypes
coords.CoordinateSequence.ctypes = property(_speedups.coordseq_ctypes)
_orig['geos_linestring_from_py'] = linestring.geos_linestring_from_py
linestring.geos_linestring_from_py = _speedups.geos_linestring_from_py
_orig['geos_linearring_from_py'] = polygon.geos_linearring_from_py
polygon.geos_linearring_from_py = _speedups.geos_linearring_from_py
def disable():
if not _orig:
return
coords.CoordinateSequence.ctypes = _orig['CoordinateSequence.ctypes']
linestring.geos_linestring_from_py = _orig['geos_linestring_from_py']
polygon.geos_linearring_from_py = _orig['geos_linearring_from_py']
_orig.clear() |
huangbop/today | refs/heads/master | products/migrations/0003_auto_20150819_1654.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20150819_0258'),
]
operations = [
migrations.AlterField(
model_name='product',
name='is_show',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.FloatField(),
),
migrations.AlterField(
model_name='product',
name='rate',
field=models.FloatField(),
),
]
|
ketjow4/NOV | refs/heads/master | Lib/encodings/cp1006.py | 593 | """ Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1006',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
u'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
u'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
u'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
u'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
u'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
u'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
u'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
u'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
u'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
u'\u060c' # 0xAB -> ARABIC COMMA
u'\u061b' # 0xAC -> ARABIC SEMICOLON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u061f' # 0xAE -> ARABIC QUESTION MARK
u'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
u'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
u'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
u'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
u'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
u'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
u'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
u'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
u'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
u'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
u'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
u'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
u'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
u'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
u'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
u'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
u'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
u'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
u'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
u'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
u'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
u'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
u'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
u'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
u'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
u'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
u'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
u'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
u'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
u'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
u'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
u'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
u'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
u'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
u'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
u'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
u'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
u'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
u'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
u'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
u'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
u'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
u'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
u'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
u'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
u'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bradh/samba | refs/heads/master | python/samba/netcmd/vampire.py | 40 | # Vampire
#
# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
from samba.net import Net
from samba.netcmd import (
Command,
Option,
SuperCommand,
CommandError
)
class cmd_vampire(Command):
"""Join and synchronise a remote AD domain to the local server."""
synopsis = "%prog [options] <domain>"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--target-dir", help="Target directory.", type=str),
Option("--force", help="force run", action='store_true', default=False),
]
takes_args = ["domain"]
def run(self, domain, target_dir=None, credopts=None, sambaopts=None, versionopts=None, force=False):
if not force:
raise CommandError("samba-tool vampire is deprecated, please use samba-tool domain join. Use --force to override")
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
net = Net(creds, lp, server=credopts.ipaddress)
(domain_name, domain_sid) = net.vampire(domain=domain, target_dir=target_dir)
self.outf.write("Vampired domain %s (%s)\n" % (domain_name, domain_sid))
|
pranavtbhat/EE219 | refs/heads/master | project3/part5.py | 1 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import KFold
import part1
def load_dataset():
df = pd.read_csv(
'ml-100k/u.data',
delimiter='\t',
names = ['user_id', 'item_id', 'rating', 'timestamp'],
header=0
)
R = df.pivot_table(
index=['user_id'],
columns=['item_id'],
values='rating',
fill_value=0
).values
W = R.copy()
W[W > 0] = 1
return df.as_matrix(), R, W
def squared_error(R, W, U, V):
return np.sum((W * (R - np.dot(U, V))) ** 2)
if __name__ == "__main__":
data, R_mat, W_mat = load_dataset()
L = 5
n_folds = 10
test_length = len(data) / n_folds
top_movies_order = []
kf = KFold(n=len(data), n_folds=10, shuffle=True)
hcv = []
mcv = []
tcv = []
pcv = []
for train_index, test_index in kf:
print "Fold: ", 10 - n_folds + 1
test_data = data[test_index]
R_train = W_mat
W_train = R_mat
for j in range(test_length):
W_train[test_data[j][0] - 1, test_data[j][1] - 1] = 0
U,V = part1.matrix_factorize(R_train, W_train, 100, reg_param=0.01)
R_predicted = 5 * np.dot(U,V)
R_predicted[R_train == 0] = -1
for i in range(max(data[:,0])):
user_ratings = R_predicted[i]
top_movies = user_ratings.argsort()[-max(data[:,1]):][::-1]
top_movies_order.append(top_movies)
threshold = 3
hv=[]
mv=[]
tv=[]
pv=[]
for l in range(1,(L+1)):
hit = 0
miss = 0
total = 0
precision = 0
for i in range(max(data[:,0])):
rec_indices = R_predicted[i,0:l]
for j in range(len(rec_indices)):
rating = R_predicted[i][rec_indices[j]]
if (rating < 0):
continue
if (rating > threshold):
hit = hit + 1
total = total + 1
precision += 1
else:
miss = miss + 1
total = total + 1
pv.append(precision/float(total))
hv.append(hit)
tv.append(total)
mv.append(miss)
hcv.append(hv)
mcv.append(mv)
tcv.append(tv)
pcv.append(pv)
n_folds -= 1
precision = np.sum(pcv,axis=0)
hits = np.sum(hcv,axis=0)
miss = np.sum(mcv,axis=0)
total = np.sum(tcv,axis=0)
hits = hits / (total.astype(float))
miss = miss / (total.astype(float))
precision = precision / 10.0
print "Hits ", hits
print "Misses", miss
print "Precision : ", precision
plt.figure(1)
plt.ylabel('Hit rate')
plt.xlabel('L')
plt.title('Hit rate vs L')
plt.scatter(range(1,(L+1)), hits, s=60, marker='o')
plt.plot(range(1,(L+1)),hits)
plt.savefig("plots/Hit vs L.png",format='png')
plt.clf()
plt.figure(1)
plt.ylabel('False alarm')
plt.xlabel('L')
plt.title('False Alarm vs L')
plt.scatter(range(1,(L+1)), miss, s=60, marker='o')
plt.plot(range(1,(L+1)),miss)
plt.savefig("plots/False Alarm vs L.png",format='png')
plt.clf()
plt.figure(1)
plt.ylabel('Hit rate')
plt.xlabel('False Alarm')
plt.title('Hit rate vs False Alarm')
plt.scatter(miss, hits, s=60, marker='o')
plt.plot(miss,hits)
plt.savefig("plots/Hit rate vs False Alarm.png",format='png')
plt.clf()
|
ravibhure/ansible | refs/heads/devel | lib/ansible/modules/system/pam_limits.py | 30 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pam_limits
version_added: "2.0"
author:
- "Sebastien Rohaut (@usawa)"
short_description: Modify Linux PAM limits
description:
- The C(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.
For the full documentation, see man limits.conf(5).
options:
domain:
description:
- A username, @groupname, wildcard, uid/gid range.
required: true
limit_type:
description:
- Limit type, see C(man limits) for an explanation
required: true
choices: [ "hard", "soft", "-" ]
limit_item:
description:
- The limit to be set
required: true
choices:
- "core"
- "data"
- "fsize"
- "memlock"
- "nofile"
- "rss"
- "stack"
- "cpu"
- "nproc"
- "as"
- "maxlogins"
- "maxsyslogins"
- "priority"
- "locks"
- "sigpending"
- "msgqueue"
- "nice"
- "rtprio"
- "chroot"
value:
description:
- The value of the limit.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
use_min:
description:
- If set to C(yes), the minimal value will be used or conserved.
If the specified value is inferior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
use_max:
description:
- If set to C(yes), the maximal value will be used or conserved.
If the specified value is superior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
dest:
description:
- Modify the limits.conf path.
required: false
default: "/etc/security/limits.conf"
comment:
description:
- Comment associated with the limit.
required: false
default: ''
notes:
- If dest file doesn't exists, it is created.
'''
EXAMPLES = '''
# Add or modify nofile soft limit for the user joe
- pam_limits:
domain: joe
limit_type: soft
limit_item: nofile
value: 64000
# Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
- pam_limits:
domain: smith
limit_type: hard
limit_item: fsize
value: 1000000
use_max: yes
# Add or modify memlock, both soft and hard, limit for the user james with a comment.
- pam_limits:
domain: james
limit_type: '-'
limit_item: memlock
value: unlimited
comment: unlimited memory lock for james
'''
import os
import os.path
import tempfile
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
pam_types = ['soft', 'hard', '-']
limits_conf = '/etc/security/limits.conf'
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
domain=dict(required=True, type='str'),
limit_type=dict(required=True, type='str', choices=pam_types),
limit_item=dict(required=True, type='str', choices=pam_items),
value=dict(required=True, type='str'),
use_max=dict(default=False, type='bool'),
use_min=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
dest=dict(default=limits_conf, type='str'),
comment=dict(required=False, default='', type='str')
)
)
domain = module.params['domain']
limit_type = module.params['limit_type']
limit_item = module.params['limit_item']
value = module.params['value']
use_max = module.params['use_max']
use_min = module.params['use_min']
backup = module.params['backup']
limits_conf = module.params['dest']
new_comment = module.params['comment']
changed = False
if os.path.isfile(limits_conf):
if not os.access(limits_conf, os.W_OK):
module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
else:
limits_conf_dir = os.path.dirname(limits_conf)
if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
open(limits_conf, 'a').close()
changed = True
else:
module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
if use_max and use_min:
module.fail_json(msg="Cannot use use_min and use_max at the same time.")
if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
# Backup
if backup:
backup_file = module.backup_local(limits_conf)
space_pattern = re.compile(r'\s+')
message = ''
f = open(limits_conf, 'rb')
# Tempfile
nf = tempfile.NamedTemporaryFile(mode='w+')
found = False
new_value = value
for line in f:
line = to_native(line, errors='surrogate_or_strict')
if line.startswith('#'):
nf.write(line)
continue
newline = re.sub(space_pattern, ' ', line).strip()
if not newline:
nf.write(line)
continue
# Remove comment in line
newline = newline.split('#', 1)[0]
try:
old_comment = line.split('#', 1)[1]
except:
old_comment = ''
newline = newline.rstrip()
if not new_comment:
new_comment = old_comment
line_fields = newline.split(' ')
if len(line_fields) != 4:
nf.write(line)
continue
line_domain = line_fields[0]
line_type = line_fields[1]
line_item = line_fields[2]
actual_value = line_fields[3]
if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
# Found the line
if line_domain == domain and line_type == limit_type and line_item == limit_item:
found = True
if value == actual_value:
message = line
nf.write(line)
continue
actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
value_unlimited = value in ['unlimited', 'infinity', '-1']
if use_max:
if value.isdigit() and actual_value.isdigit():
new_value = str(max(int(value), int(actual_value)))
elif actual_value_unlimited:
new_value = actual_value
else:
new_value = value
if use_min:
if value.isdigit() and actual_value.isdigit():
new_value = str(min(int(value), int(actual_value)))
elif value_unlimited:
new_value = actual_value
else:
new_value = value
# Change line only if value has changed
if new_value != actual_value:
changed = True
if new_comment:
new_comment = "\t#" + new_comment
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
else:
message = line
nf.write(line)
else:
nf.write(line)
if not found:
changed = True
if new_comment:
new_comment = "\t#" + new_comment
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
f.close()
nf.flush()
# Copy tempfile to newfile
module.atomic_move(nf.name, f.name)
try:
nf.close()
except:
pass
res_args = dict(
changed=changed, msg=message
)
if backup:
res_args['backup_file'] = backup_file
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
fire-rs-laas/fire-rs-saop | refs/heads/master | python/fire_rs/monitoring/__init__.py | 1 | # Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__all__ = ['supersaop']
|
hpsilva/profitpy | refs/heads/master | profit/models/executions.py | 18 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from time import strftime, strptime
from PyQt4.QtCore import Qt, QModelIndex, QVariant, QString
from profit.lib import valueAlign
from profit.models import BasicItem, BasicItemModel
class ExecutionsModel(BasicItemModel):
"""
"""
def __init__(self, session=None, parent=None):
BasicItemModel.__init__(self, RootExecutionsItem(), parent)
self.session = session
if session is not None:
session.registerMeta(self)
def data(self, index, role):
"""
"""
if not index.isValid():
return QVariant()
item = index.internalPointer()
data = QVariant()
column = index.column()
if role == Qt.DecorationRole and column==2:
sym = item.symbol()
ico = self.symbolIcon(sym)
data = QVariant(ico)
elif role in (Qt.DisplayRole, Qt.ToolTipRole):
data = QVariant(item[column])
elif role in (Qt.TextAlignmentRole, ):
try:
float(item[column])
data = QVariant(valueAlign)
except (ValueError, ):
pass
return data
def findItem(self, orderId):
""" Returns the item for the given contract, or None.
"""
items = self.invisibleRootItem.children
try:
return [i for i in items if i.message.execution.m_orderId==orderId][0]
except (IndexError, ):
pass
def on_session_ExecDetails(self, message):
""" Adds a status row if the contract is known to the model.
"""
item = self.findItem(message.execution.m_orderId)
if not item:
root = self.invisibleRootItem
item = ExecutionsItem.fromMessage(message, root)
root.append(item)
item.append(ExecutionsItem.fromMessage(message, item))
item.update(message)
self.reset()
dayFormatOut = '%a %d %b %Y'
dayFormatIn = '%Y%m%d'
def messageDate(message):
""" Extracts and formats the date from an execution details message.
@param message message instance
@return formatted date as string
"""
datetime = message.execution.m_time
datepart = datetime.split()[0]
return strftime(dayFormatOut, strptime(datepart, dayFormatIn))
def messageTime(message):
""" Extracts the time from an execution details message.
@param message message instance
@return time as string
"""
datetime = message.execution.m_time
timepart = datetime.split()[1]
return timepart
class ExecutionsItem(BasicItem):
""" Base class for items in the executions model.
"""
columnLookups = [
('Action', lambda m:m.execution.m_side),
('Quantity', lambda m:m.execution.m_shares),
('Underlying', lambda m:m.contract.m_symbol),
('Price', lambda m:m.execution.m_price),
('Currency', lambda m:m.contract.m_currency),
('Exchange', lambda m:m.execution.m_exchange),
('Date', messageDate),
('Time', messageTime),
('Id', lambda m:m.execution.m_permId),
('Order Reference', lambda m:m.execution.m_orderId),
]
def __init__(self, data, parent=None, message=None):
BasicItem.__init__(self, data, parent)
self.message = message
@classmethod
def fromMessage(cls, message, parent):
""" New instance from message values
@param cls class object
@param message ib.opt.message object
@param parent parent of this item
@return new instance of cls
"""
values = []
for label, lookup in cls.columnLookups:
try:
value = lookup(message)
except (AttributeError, ):
value = ''
values.append(value)
return cls(values, parent, message)
def symbol(self):
""" Returns the symbol for this item or ''
"""
try:
return self.message.contract.m_symbol
except (AttributeError, ):
return ''
def update(self, message):
""" Update the item with values from a message.
@param message ib.opt.message object
@return None
"""
for column, (label, lookup) in enumerate(self.columnLookups):
try:
self[column] = lookup(message)
except (AttributeError, ):
pass
class RootExecutionsItem(ExecutionsItem):
""" Executions model item with automatic values (for horizontal headers).
"""
def __init__(self):
ExecutionsItem.__init__(self, self.horizontalLabels())
def horizontalLabels(self):
""" Generates list of horizontal header values.
"""
return map(QVariant, [label for label, lookup in self.columnLookups])
|
biddyweb/androguard | refs/heads/master | androxgmml.py | 38 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from xml.sax.saxutils import escape
import sys, os
from optparse import OptionParser
from androguard.core.androgen import Androguard
from androguard.core.analysis import analysis
option_0 = { 'name' : ('-i', '--input'), 'help' : 'filename input', 'nargs' : 1 }
option_1 = { 'name' : ('-o', '--output'), 'help' : 'filename output of the xgmml', 'nargs' : 1 }
option_2 = { 'name' : ('-f', '--functions'), 'help' : 'include function calls', 'action' : 'count' }
option_3 = { 'name' : ('-e', '--externals'), 'help' : 'include extern function calls', 'action' : 'count' }
option_4 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4]
METHODS_ID = {}
EXTERNAL_METHODS_ID = {}
NODES_ID = {}
EDGES_ID = {}
NODE_GRAPHIC = {
"classic" : {
"h" : 20.0,
"w" : 20.0,
"type" : "ELLIPSE",
"width" : 1,
"fill" : "#e1e1e1",
"outline" : "#000000",
},
"extern" : {
"h" : 20.0,
"w" : 20.0,
"type" : "ELLIPSE",
"width" : 1,
"fill" : "#ff8c00",
"outline" : "#000000",
}
}
EDGE_GRAPHIC = {
"cfg" : {
"width" : 2,
"fill" : "#0000e1",
},
"fcg" : {
"width" : 3,
"fill" : "#9acd32",
},
"efcg" : {
"width" : 3,
"fill" : "#808000",
}
}
def get_node_name(method, bb) :
return "%s-%s-%s" % ( method.get_class_name(), escape(bb.name), escape(method.get_descriptor()) )
def export_xgmml_cfg(g, fd) :
method = g.get_method()
name = method.get_name()
class_name = method.get_class_name()
descriptor = method.get_descriptor()
if method.get_code() != None :
size_ins = method.get_code().get_length()
for i in g.basic_blocks.get() :
fd.write("<node id=\"%d\" label=\"%s\">\n" % (len(NODES_ID), get_node_name(method, i)))
fd.write("<att type=\"string\" name=\"classname\" value=\"%s\"/>\n" % (escape(class_name)))
fd.write("<att type=\"string\" name=\"name\" value=\"%s\"/>\n" % (escape(name)))
fd.write("<att type=\"string\" name=\"descriptor\" value=\"%s\"/>\n" % (escape(descriptor)))
fd.write("<att type=\"integer\" name=\"offset\" value=\"%d\"/>\n" % (i.start))
cl = NODE_GRAPHIC["classic"]
width = cl["width"]
fill = cl["fill"]
# No child ...
if i.childs == [] :
fill = "#87ceeb"
if i.start == 0 :
fd.write("<att type=\"string\" name=\"node.label\" value=\"%s\\n%s\"/>\n" % (escape(name), i.get_instructions()[-1].get_name()))
width = 3
fill = "#ff0000"
METHODS_ID[ class_name + name + descriptor ] = len(NODES_ID)
else :
fd.write("<att type=\"string\" name=\"node.label\" value=\"0x%x\\n%s\"/>\n" % (i.start, i.get_instructions()[-1].get_name()))
size = 0
for tmp_ins in i.get_instructions() :
size += (tmp_ins.get_length() / 2)
h = ((size / float(size_ins)) * 20) + cl["h"]
fd.write("<graphics type=\"%s\" h=\"%.1f\" w=\"%.1f\" width=\"%d\" fill=\"%s\" outline=\"%s\">\n" % ( cl["type"], h, h, width, fill, cl["outline"]))
fd.write("</graphics>\n")
fd.write("</node>\n")
NODES_ID[ class_name + i.name + descriptor ] = len(NODES_ID)
for i in g.basic_blocks.get() :
for j in i.childs :
if j[-1] != None :
label = "%s (cfg) %s" % (get_node_name(method, i), get_node_name(method, j[-1]))
id = len(NODES_ID) + len(EDGES_ID)
fd.write( "<edge id=\"%d\" label=\"%s\" source=\"%d\" target=\"%d\">\n" % (id, label, NODES_ID[ class_name + i.name + descriptor ], NODES_ID[ class_name + j[-1].name + descriptor ]) )
cl = EDGE_GRAPHIC["cfg"]
fd.write("<graphics width=\"%d\" fill=\"%s\">\n" % (cl["width"], cl["fill"]) )
fd.write("</graphics>\n")
fd.write("</edge>\n")
EDGES_ID[ label ] = id
def export_xgmml_fcg(a, x, fd) :
classes = a.get_classes_names()
# Methods flow graph
for m, _ in x.get_tainted_packages().get_packages() :
paths = m.get_methods()
for j in paths :
if j.get_method().get_class_name() in classes and m.get_info() in classes :
if j.get_access_flag() == analysis.TAINTED_PACKAGE_CALL :
t = m.get_info() + j.get_name() + j.get_descriptor()
if t not in METHODS_ID :
continue
bb1 = x.get_method( j.get_method() ).basic_blocks.get_basic_block( j.get_idx() )
node1 = get_node_name(j.get_method(), bb1) + "@0x%x" % j.get_idx()
node2 = "%s-%s-%s" % (m.get_info(), escape(j.get_name()), escape(j.get_descriptor()))
label = "%s (fcg) %s" % (node1, node2)
if label in EDGES_ID :
continue
id = len(NODES_ID) + len(EDGES_ID)
fd.write( "<edge id=\"%d\" label=\"%s\" source=\"%d\" target=\"%d\">\n" % (id,
label,
NODES_ID[ j.get_method().get_class_name() + bb1.name + j.get_method().get_descriptor() ],
METHODS_ID[ m.get_info() + j.get_name() + j.get_descriptor() ]) )
cl = EDGE_GRAPHIC["fcg"]
fd.write("<graphics width=\"%d\" fill=\"%s\">\n" % (cl["width"], cl["fill"]) )
fd.write("</graphics>\n")
fd.write("</edge>\n")
EDGES_ID[ label ] = id
def export_xgmml_efcg(a, x, fd) :
classes = a.get_classes_names()
# Methods flow graph
for m, _ in x.get_tainted_packages().get_packages() :
paths = m.get_methods()
for j in paths :
if j.get_method().get_class_name() in classes and m.get_info() not in classes :
if j.get_access_flag() == analysis.TAINTED_PACKAGE_CALL :
t = m.get_info() + j.get_name() + j.get_descriptor()
if t not in EXTERNAL_METHODS_ID :
fd.write("<node id=\"%d\" label=\"%s\">\n" % (len(NODES_ID), escape(t)))
fd.write("<att type=\"string\" name=\"classname\" value=\"%s\"/>\n" % (escape(m.get_info())))
fd.write("<att type=\"string\" name=\"name\" value=\"%s\"/>\n" % (escape(j.get_name())))
fd.write("<att type=\"string\" name=\"descriptor\" value=\"%s\"/>\n" % (escape(j.get_descriptor())))
cl = NODE_GRAPHIC["extern"]
fd.write("<att type=\"string\" name=\"node.label\" value=\"%s\\n%s\\n%s\"/>\n" % (escape(m.get_info()), escape(j.get_name()), escape(j.get_descriptor())))
fd.write("<graphics type=\"%s\" h=\"%.1f\" w=\"%.1f\" width=\"%d\" fill=\"%s\" outline=\"%s\">\n" % ( cl["type"], cl["h"], cl["h"], cl["width"], cl["fill"], cl["outline"]))
fd.write("</graphics>\n")
fd.write("</node>\n")
NODES_ID[ t ] = len(NODES_ID)
EXTERNAL_METHODS_ID[ t ] = NODES_ID[ t ]
bb1 = x.get_method( j.get_method() ).basic_blocks.get_basic_block( j.get_idx() )
node1 = get_node_name(j.get_method(), bb1) + "@0x%x" % j.get_idx()
node2 = "%s-%s-%s" % (m.get_info(), escape(j.get_name()), escape(j.get_descriptor()))
label = "%s (efcg) %s" % (node1, node2)
if label in EDGES_ID :
continue
id = len(NODES_ID) + len(EDGES_ID)
fd.write( "<edge id=\"%d\" label=\"%s\" source=\"%d\" target=\"%d\">\n" % (id,
label,
NODES_ID[ j.get_method().get_class_name() + bb1.name + j.get_method().get_descriptor() ],
EXTERNAL_METHODS_ID[ m.get_info() + j.get_name() + j.get_descriptor() ]) )
cl = EDGE_GRAPHIC["efcg"]
fd.write("<graphics width=\"%d\" fill=\"%s\">\n" % (cl["width"], cl["fill"]) )
fd.write("</graphics>\n")
fd.write("</edge>\n")
EDGES_ID[ label ] = id
def export_apps_to_xgmml( input, output, fcg, efcg ) :
a = Androguard( [ input ] )
fd = open(output, "w")
fd.write("<?xml version='1.0'?>\n")
fd.write("<graph label=\"Androguard XGMML %s\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:ns1=\"http://www.w3.org/1999/xlink\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns=\"http://www.cs.rpi.edu/XGMML\" directed=\"1\">\n" % (os.path.basename(input)))
for vm in a.get_vms() :
x = analysis.VMAnalysis( vm )
# CFG
for method in vm.get_methods() :
g = x.get_method( method )
export_xgmml_cfg(g, fd)
if fcg :
export_xgmml_fcg(vm, x, fd)
if efcg :
export_xgmml_efcg(vm, x, fd)
fd.write("</graph>")
fd.close()
def main(options, arguments) :
if options.input != None and options.output != None :
export_apps_to_xgmml( options.input, options.output, options.functions, options.externals )
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
|
Zhaominxin/MyProject | refs/heads/master | 鱼CPython精英挑战赛S2E1.py | 1 | #Python精英挑战赛第二季第1期:本期题目:字符串格式化输出“两端对齐”
#给定一组字符串,字符串均由标准英语单词、空格及常用英语标点符号组成,
#要求编写函数,根据给定宽度,按照“两端对齐”的格式化输出。
#“两端对齐”格式要求:
#1. 每行必须以字母或标点符号开头,但表示结束的标点符号,如逗号、句号、问好、感叹号等不允许出现在每行开头。每行必须以字母或标点符号结尾,空格不允许出现在每行的开头和结尾。
#2. 若某一行只容得下一个单词,则该行按照左对齐格式输出,行尾同样不需要用空格填充。
#3. 参数width肯定会大于字符串中最长单词的长度,不需要考虑单词长度超过width的情况。
#4. 用空格填充时,应当遵循“空格尽可能均匀分布到单词与单词之间”的原则。
txt = "Hot work is one of the typical high risk work in work shop, if out of control, it will cause tragedy. We manage our hot work basing on the FM hot work permit system. Then, to make sure the fire risk are eliminated before we start hot work, what should we do? Please refer to this week's topic, hot work permit."
def adjust_txt(txt, width):
words_list = txt.split()
word_space = []
for i in words_list:
word_space.append(i)
word_space.append(' ')
word_space.pop()
#以width为行宽上限添加换行符,并去掉首尾空格
lines_list = []
count = 0
for i in range(len(word_space)):
if (count + len(word_space[i])) <= width:
lines_list.append(word_space[i])
count += len(word_space[i])
else:
lines_list.append('\n')
count = 0
lines_list.append(word_space[i])
count+=len(word_space[i])
try:
for i in range(len(lines_list)):
if lines_list[i] == '\n':
if lines_list[i-1] == ' ':
lines_list.pop(i-1) #换行符旁边有且只有一个空格,要么在左要么在右
else:
lines_list.pop(i+1)
except IndexError:
None
output = '' #初始化最终输出的字符串
#以空格均匀填充每行单词之间的间隙
pre_output=''
for i in lines_list:
pre_output += i
fin_lines = pre_output.split('\n')
for each in fin_lines:
#print('共%d个字节' % len(each))
#print('有%d个空' % (len(each.split())-1))
#print('缺%d个空格' % (width-len(each)))
split_string = each.split() #按空格拆分
space_dict = {i:0 for i in range(len(split_string)-1)} #建立间隙字典
#计算每个间隙隙应填充几个空格
new_string = ''
count_space = width - len(each)
if count_space >= len(split_string):#如果应补空格数大于等于间隙数量
while count_space >= len(split_string):
for i in range(len(space_dict)):
space_dict[i] +=1
count_space -= len(space_dict)
for i in range(count_space):
space_dict[i] += 1
else: #应补空格数小于等于空隙数量
for i in range(count_space):
space_dict[i] += 1
#根据字典填充空格
new_each =[]
for i in range(len(space_dict)):
new_each.append(split_string[i])
new_each.append(' '*(space_dict[i]+1))
new_each.append(split_string[-1])
for each in new_each:
new_string += each
output += (new_string + '\n')
return output
output = adjust_txt(txt, 30)
print(output)
|
daq-tools/kotori | refs/heads/amo/packaging | kotori/vendor/hydro2motion/database/sql.py | 1 | # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
from sqlalchemy.engine import create_engine
from sqlalchemy.sql.ddl import CreateTable
from sqlalchemy.sql.schema import MetaData, Column, Table
from sqlalchemy.sql.sqltypes import Integer, String, Numeric
from alchimia.strategy import TWISTED_STRATEGY
from autobahn.twisted.wamp import ApplicationRunner, ApplicationSession
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
class SqlDatabaseService(ApplicationSession):
"""An application component for logging telemetry data to sql databases"""
def __init__(self, config):
ApplicationSession.__init__(self, config)
self.count = 0
self.engine = None
metadata = MetaData()
self.telemetry = Table("telemetry", metadata,
Column("id", Integer(), primary_key=True),
Column("MSG_ID", Integer()),
Column("V_FC", Integer()),
Column("V_CAP", Integer()),
Column("A_ENG", Integer()),
Column("A_CAP", Integer()),
Column("T_O2_In", Integer()),
Column("T_O2_Out", Integer()),
Column("T_FC_H2O_Out", Integer()),
Column("Water_In", Integer()),
Column("Water_Out", Integer()),
Column("Master_SW", Integer()),
Column("CAP_Down_SW", Integer()),
Column("Drive_SW", Integer()),
Column("FC_state", Integer()),
Column("Mosfet_state", Integer()),
Column("Safety_state", Integer()),
Column("Air_Pump_load", Numeric()),
Column("Mosfet_load", Integer()),
Column("Water_Pump_load", Integer()),
Column("Fan_load", Integer()),
Column("Acc_X", Integer()),
Column("Acc_Y", Integer()),
Column("Acc_Z", Integer()),
Column("AUX", Numeric()),
Column("GPS_X", Integer()),
Column("GPS_Y", Integer()),
Column("GPS_Z", Integer()),
Column("GPS_Speed", Integer()),
Column("V_Safety", Integer()),
Column("H2_Level", Integer()),
Column("O2_calc", Numeric()),
Column("lat", Numeric()),
Column("lng", Numeric()),
)
# metadata = MetaData()
# self.telemetry = Table("telemetry", metadata,
# Column("id", Integer(), primary_key=True),
# Column("mma_x", Integer()),
# Column("mma_y", Integer()),
# Column("temp", Numeric()),
# Column("lat", Numeric()),
# Column("lng", Numeric()),
# )
#@inlineCallbacks
def onJoin(self, details):
print("Realm joined (WAMP session started).")
# subscribe to telemetry data channel
self.subscribe(self.receive, u'de.elmyra.kotori.telemetry.data')
self.startDatabase()
#self.leave()
#@inlineCallbacks
def startDatabase(self):
self.engine = create_engine(
# sqlite in-memory
#"sqlite://", reactor=reactor, strategy=TWISTED_STRATEGY
# sqlite on filesystem
"sqlite:////tmp/kotori.sqlite", reactor=reactor, strategy=TWISTED_STRATEGY
# mysql... todo
)
# Create the table
yield self.engine.execute(CreateTable(self.telemetry))
#yield self.engine
def onLeave(self, details):
print("Realm left (WAMP session ended).")
ApplicationSession.onLeave(self, details)
def onDisconnect(self):
print("Transport disconnected.")
#reactor.stop()
#@inlineCallbacks
def receive(self, data):
#print "RECEIVE:", data
# decode wire data
payload = data.split(';')
MSG_ID = int(payload[0])
V_FC = int(payload[1])
V_CAP = int(payload[2])
A_ENG = int(payload[3])
A_CAP = int(payload[4])
T_O2_In = int(payload[5])
T_O2_Out = int(payload[6])
T_FC_H2O_Out = int(payload[7])
Water_In = int(payload[8])
Water_Out = int(payload[9])
Master_SW = int(payload[10])
CAP_Down_SW = int(payload[11])
Drive_SW = int(payload[12])
FC_state = int(payload[13])
Mosfet_state = int(payload[14])
Safety_state = int(payload[15])
Air_Pump_load = float(payload[16])
Mosfet_load = int(payload[17])
Water_Pump_load = int(payload[18])
Fan_load = int(payload[19])
Acc_X = int(payload[20])
Acc_Y = int(payload[21])
Acc_Z = int(payload[22])
AUX = float(payload[23])
GPS_X = int(payload[24])
GPS_Y = int(payload[25])
GPS_Z = int(payload[26])
GPS_Speed = int(payload[27])
V_Safety = int(payload[28])
H2_Level = int(payload[29])
O2_calc = float(payload[30])
lat = float(payload[31])
lng = float(payload[32])
# mma_x = int(payload[0])
# mma_y = int(payload[1])
# temp = float(payload[2])
# try:
# lat = float(payload[3])
# lng = float(payload[4])
# except:
# lat = 0
# lng = 0
# store data to database
if self.engine:
yield self.engine.execute(self.telemetry.insert().values(MSG_ID = MSG_ID, V_FC = V_FC, V_CAP = V_CAP, A_ENG = A_ENG, A_CAP = A_CAP, T_O2_In = T_O2_In, T_O2_Out = T_O2_Out, T_FC_H2O_Out = T_FC_H2O_Out, Water_In = Water_In, Water_Out = Water_Out, Master_SW = Master_SW, CAP_Down_SW = CAP_Down_SW, Drive_SW = Drive_SW, FC_state = FC_state, Mosfet_state = Mosfet_state, Safety_state = Safety_state, Air_Pump_load = Air_Pump_load, Mosfet_load = Mosfet_load, Water_Pump_load = Water_Pump_load, Fan_load = Fan_load, Acc_X = Acc_X, Acc_Y = Acc_Y, Acc_Z = Acc_Z, AUX = AUX, GPS_X = GPS_X, GPS_Y = GPS_Y, GPS_Z = GPS_Z, GPS_Speed = GPS_Speed, V_Safety = V_Safety, H2_Level = H2_Level, lat = lat, lng = lng))
def boot_sql_database(websocket_uri, debug=False, trace=False):
print('INFO: Starting sql database service, connecting to broker', websocket_uri)
runner = ApplicationRunner(websocket_uri, u'kotori-realm', debug=trace, debug_wamp=debug, debug_app=debug)
runner.run(SqlDatabaseService, start_reactor=False)
|
esthermm/odoo-addons | refs/heads/8.0 | project_task_ending/__openerp__.py | 3 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Project Task Ending",
"summary": "",
"version": "1.0",
"category": "Project Management",
"license": "AGPL-3",
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>",
],
"depends": [
"project",
],
"data": [
"views/project_task_view.xml",
],
"installable": True,
}
|
agogear/python-1 | refs/heads/master | Matafight/0006/importantdiary.py | 76 | #_*_ encoding: utf-8 _*_
import re
class countWord:
def __init__(self):
self.dic={};
self.word="";
def count(self,filename):
self.dic={};
fopen=file(filename,'r');
for lines in fopen.readlines():
words=re.findall(r"\w+",lines);
for items in words:
if items in self.dic.keys():
self.dic[items]+=1;
else:
self.dic[items]=1;
#对字典value值排序
dict= sorted(self.dic.iteritems(), key=lambda d:d[1], reverse = True);
self.word=dict[0][0];
def getWord(self):
return self.word;
if __name__=="__main__":
diarycount=countWord();
order=1;
importantlist=[];
for order in range(1,4):
fname="diary"+str(order)+".txt";
diarycount.count(fname);
importantlist.append(diarycount.getWord());
order+=1;
for item in importantlist:
print str(item)+"\t";
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.