repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
lsinfo/odoo
|
refs/heads/8.0
|
addons/multi_company/__init__.py
|
886
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
smarani/InfraShareMobile
|
refs/heads/master
|
InfraShare Online/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py
|
469
|
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
|
a-veitch/grpc
|
refs/heads/master
|
tools/run_tests/sanity/check_sources_and_headers.py
|
3
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import re
import sys
root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
with open(os.path.join(root, 'tools', 'run_tests', 'sources_and_headers.json')) as f:
js = json.loads(f.read())
re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"')
assert re_inc1.match('#include "foo"').group(1) == 'foo'
re_inc2 = re.compile(r'^#\s*include\s*<((grpc|grpc\+\+)/[^"]*)>')
assert re_inc2.match('#include <grpc++/foo>').group(1) == 'grpc++/foo'
def get_target(name):
for target in js:
if target['name'] == name:
return target
assert False, 'no target %s' % name
def target_has_header(target, name):
# print target['name'], name
if name in target['headers']:
return True
for dep in target['deps']:
if target_has_header(get_target(dep), name):
return True
if name in ['src/core/lib/profiling/stap_probes.h',
'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h']:
return True
return False
def produces_object(name):
return os.path.splitext(name)[1] in ['.c', '.cc']
obj_producer_to_source = {'c': {}, 'c++': {}, 'csharp': {}}
errors = 0
for target in js:
if not target['third_party']:
for fn in target['src']:
with open(os.path.join(root, fn)) as f:
src = f.read().splitlines()
for line in src:
m = re_inc1.match(line)
if m:
if not target_has_header(target, m.group(1)):
print (
'target %s (%s) does not name header %s as a dependency' % (
target['name'], fn, m.group(1)))
errors += 1
m = re_inc2.match(line)
if m:
if not target_has_header(target, 'include/' + m.group(1)):
print (
'target %s (%s) does not name header %s as a dependency' % (
target['name'], fn, m.group(1)))
errors += 1
if target['type'] == 'lib':
for fn in target['src']:
language = target['language']
if produces_object(fn):
obj_base = os.path.splitext(os.path.basename(fn))[0]
if obj_base in obj_producer_to_source[language]:
if obj_producer_to_source[language][obj_base] != fn:
print (
'target %s (%s) produces an aliased object file with %s' % (
target['name'], fn, obj_producer_to_source[language][obj_base]))
else:
obj_producer_to_source[language][obj_base] = fn
assert errors == 0
|
SivagnanamCiena/robotframework
|
refs/heads/master
|
src/robot/libraries/DateTime.py
|
11
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test library for handling date and time values.
``DateTime`` is a Robot Framework standard library that supports creating and
converting date and time values (e.g. `Get Current Date`, `Convert Time`),
as well as doing simple calculations with them (e.g. `Subtract Time From Date`,
`Add Time To Time`). It supports dates and times in various formats, and can
also be used by other libraries programmatically.
This library is new in Robot Framework 2.8.5.
= Table of Contents =
- `Terminology`
- `Date formats`
- `Time formats`
- `Millisecond handling`
- `Programmatic usage`
- `Shortcuts`
- `Keywords`
= Terminology =
In the context of this library, ``date`` and ``time`` generally have following
meanings:
- ``date``: An entity with both date and time components but without any
timezone information. For example, ``2014-06-11 10:07:42``.
- ``time``: A time interval. For example, ``1 hour 20 minutes`` or ``01:20:00``.
This terminology differs from what Python's standard
[https://docs.python.org/2/library/datetime.html|datetime] module uses.
Basically its
[https://docs.python.org/2/library/datetime.html#datetime-objects|datetime] and
[https://docs.python.org/2/library/datetime.html#timedelta-objects|timedelta]
objects match ``date`` and ``time`` as defined by this library.
= Date formats =
Dates can given to and received from keywords in `timestamp`, `custom
timestamp`, `Python datetime` and `epoch time` formats. These formats are
discussed thoroughly in subsequent sections.
Input format is determined automatically based on the given date except when
using custom timestamps, in which case it needs to be given using
``date_format`` argument. Default result format is timestamp, but it can
be overridden using ``result_format`` argument.
The earliest date this library supports depends on the platform. Dates before
the year 1900 are not supported at all, but the limit can also be much stricter.
For example, on Windows only dates after 1970 are supported. These limitations
are due to Python's [https://docs.python.org/2/library/time.html#time.mktime|
time.mktime] function that this library uses internally.
== Timestamp ==
If a date is given as a string, it is always considered to be a timestamp.
If no custom formatting is given using ``date_format`` argument, the timestamp
is expected to be in [http://en.wikipedia.org/wiki/ISO_8601|ISO 8601] like
format ``YYYY-MM-DD hh:mm:ss.mil``, where any non-digit character can be used
as a separator or separators can be omitted altogether. Additionally,
only the date part is mandatory, all possibly missing time components are
considered to be zeros.
Dates can also be returned in the same ``YYYY-MM-DD hh:mm:ss.mil`` format by
using ``timestamp`` value with ``result_format`` argument. This is also the
default format that keywords returning dates use. Milliseconds can be excluded
using ``exclude_millis`` as explained in `Millisecond handling` section.
Examples:
| ${date1} = | Convert Date | 2014-06-11 10:07:42.000 |
| ${date2} = | Convert Date | 20140611 100742 | result_format=timestamp |
| Should Be Equal | ${date1} | ${date2} |
| ${date} = | Convert Date | 20140612 12:57 | exclude_millis=yes |
| Should Be Equal | ${date} | 2014-06-12 12:57:00 |
== Custom timestamp ==
It is possible to use custom timestamps in both input and output.
The custom format is same as accepted by Python's
[https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior|
datatime.strptime] function. For example, the default timestamp discussed
in the previous section would match ``%Y-%m-%d %H:%M:%S.%f``.
When using a custom timestamp in input, it must be specified using
``date_format`` argument. The actual input value must be a string that matches
the specified format exactly. When using a custom timestamp in output, it must
be given using ``result_format`` argument.
Examples:
| ${date} = | Convert Date | 28.05.2014 12:05 | date_format=%d.%m.%Y %H:%M |
| Should Be Equal | ${date} | 2014-05-28 12:05:00.000 |
| ${date} = | Convert Date | ${date} | result_format=%d.%m.%Y |
| Should Be Equal | ${date} | 28.05.2014 |
== Python datetime ==
Python's standard
[https://docs.python.org/2/library/datetime.html#datetime.datetime|datetime]
objects can be used both in input and output. In input they are recognized
automatically, and in output it is possible to get them by giving ``datetime``
value to ``result_format`` argument.
One nice benefit with datetime objects is that they have different time
components available as attributes that can be easily accessed using the
extended variable syntax.
Examples:
| ${datetime} = | Convert Date | 2014-06-11 10:07:42.123 | datetime |
| Should Be Equal As Integers | ${datetime.year} | 2014 |
| Should Be Equal As Integers | ${datetime.month} | 6 |
| Should Be Equal As Integers | ${datetime.day} | 11 |
| Should Be Equal As Integers | ${datetime.hour} | 10 |
| Should Be Equal As Integers | ${datetime.minute} | 7 |
| Should Be Equal As Integers | ${datetime.second} | 42 |
| Should Be Equal As Integers | ${datetime.microsecond} | 123000 |
== Epoch time ==
Epoch time is the time in seconds since the
[http://en.wikipedia.org/wiki/Unix_time|UNIX epoch] i.e. 00:00:00.000 (UTC)
1 January 1970. To give a date in epoch time, it must be given as a number
(integer or float), not as a string. To return a date in epoch time,
it is possible to use ``epoch`` value with ``result_format`` argument.
Epoch time is returned as a floating point number.
Notice that epoch time itself is independent on timezones and thus same
around the world at a certain time. What local time a certain epoch time
matches obviously then depends on the timezone. For example, examples below
were tested in Finland but verifications would fail on other timezones.
Examples:
| ${date} = | Convert Date | ${1000000000} |
| Should Be Equal | ${date} | 2001-09-09 04:46:40.000 |
| ${date} = | Convert Date | 2014-06-12 13:27:59.279 | epoch |
| Should Be Equal | ${date} | ${1402568879.279} |
= Time formats =
Similarly as dates, times can be given to and received from keywords in
various different formats. Supported formats are `number`, `time string`
(verbose and compact), `timer string` and `Python timedelta`.
Input format for time is always determined automatically based on the input.
Result format is number by default, but it can be customised using
``result_format`` argument.
== Number ==
Time given as a number is interpreted to be seconds. It can be given
either as an integer or a float, or it can be a string that can be converted
to a number.
To return a time as a number, ``result_format`` argument must have value
``number``, which is also the default. Returned number is always a float.
Examples:
| ${time} = | Convert Time | 3.14 |
| Should Be Equal | ${time} | ${3.14} |
| ${time} = | Convert Time | ${time} | result_format=number |
| Should Be Equal | ${time} | ${3.14} |
== Time string ==
Time strings are strings in format like ``1 minute 42 seconds`` or ``1min 42s``.
The basic idea of this format is having first a number and then a text
specifying what time that number represents. Numbers can be either
integers or floating point numbers, the whole format is case and space
insensitive, and it is possible to add a minus prefix to specify negative
times. The available time specifiers are:
- ``days``, ``day``, ``d``
- ``hours``, ``hour``, ``h``
- ``minutes``, ``minute``, ``mins``, ``min``, ``m``
- ``seconds``, ``second``, ``secs``, ``sec``, ``s``
- ``milliseconds``, ``millisecond``, ``millis``, ``ms``
When returning a time string, it is possible to select between ``verbose``
and ``compact`` representations using ``result_format`` argument. The verbose
format uses long specifiers ``day``, ``hour``, ``minute``, ``second`` and
``millisecond``, and adds ``s`` at the end when needed. The compact format uses
shorter specifiers ``d``, ``h``, ``min``, ``s`` and ``ms``, and even drops
the space between the number and the specifier.
Examples:
| ${time} = | Convert Time | 1 minute 42 seconds |
| Should Be Equal | ${time} | ${102} |
| ${time} = | Convert Time | 4200 | verbose |
| Should Be Equal | ${time} | 1 hour 10 minutes |
| ${time} = | Convert Time | - 1.5 hours | compact |
| Should Be Equal | ${time} | - 1h 30min |
== Timer string ==
Timer string is a string given in timer like format ``hh:mm:ss.mil``. In this
format both hour and millisecond parts are optional, leading and trailing
zeros can be left out when they are not meaningful, and negative times can
be represented by adding a minus prefix.
To return a time as timer string, ``result_format`` argument must be given
value ``timer``. Timer strings are by default returned in full ``hh:mm:ss.mil``
format, but milliseconds can be excluded using ``exclude_millis`` as explained
in `Millisecond handling` section.
Examples:
| ${time} = | Convert Time | 01:42 |
| Should Be Equal | ${time} | ${102} |
| ${time} = | Convert Time | 01:10:00.123 |
| Should Be Equal | ${time} | ${4200.123} |
| ${time} = | Convert Time | 102 | timer |
| Should Be Equal | ${time} | 00:01:42.000 |
| ${time} = | Convert Time | -101.567 | timer | exclude_millis=yes |
| Should Be Equal | ${time} | -00:01:42 |
== Python timedelta ==
Python's standard
[https://docs.python.org/2/library/datetime.html#datetime.timedelta|timedelta]
objects are also supported both in input and in output. In input they are
recognized automatically, and in output it is possible to receive them by
giving ``timedelta`` value to ``result_format`` argument.
Examples:
| ${timedelta} = | Convert Time | 01:10:02.123 | timedelta |
| Should Be Equal | ${timedelta.total_seconds()} | ${4202.123} |
= Millisecond handling =
This library handles dates and times internally using the precision of the
given input. With `timestamp`, `time string`, and `timer string` result
formats seconds are, however, rounded to millisecond accuracy. Milliseconds
may also be included even if there would be none.
All keywords returning dates or times have an option to leave milliseconds out
by giving a true value to ``exclude_millis`` argument. If the argument is given
as a string, it is considered true unless it is empty or case-insensitively
equal to ``false`` or ``no``. Other argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules as in
Python]. Notice that prior to Robot Framework 2.9, all strings except the empty
string were considered true.
When milliseconds are excluded, seconds in returned dates and times are
rounded to the nearest full second. With `timestamp` and `timer string`
result formats, milliseconds will also be removed from the returned string
altogether.
Examples:
| ${date} = | Convert Date | 2014-06-11 10:07:42 |
| Should Be Equal | ${date} | 2014-06-11 10:07:42.000 |
| ${date} = | Convert Date | 2014-06-11 10:07:42.500 | exclude_millis=yes |
| Should Be Equal | ${date} | 2014-06-11 10:07:43 |
| ${dt} = | Convert Date | 2014-06-11 10:07:42.500 | datetime | exclude_millis=yes |
| Should Be Equal | ${dt.second} | ${43} |
| Should Be Equal | ${dt.microsecond} | ${0} |
| ${time} = | Convert Time | 102 | timer | exclude_millis=false |
| Should Be Equal | ${time} | 00:01:42.000 | |
| ${time} = | Convert Time | 102.567 | timer | exclude_millis=true |
| Should Be Equal | ${time} | 00:01:43 | |
= Programmatic usage =
In addition to be used as normal library, this library is intended to
provide a stable API for other libraries to use if they want to support
same date and time formats as this library. All the provided keywords
are available as functions that can be easily imported:
| from robot.libraries.DateTime import convert_time
|
| def example_keyword(timeout):
| seconds = convert_time(timeout)
| # ...
Additionally helper classes ``Date`` and ``Time`` can be used directly:
| from robot.libraries.DateTime import Date, Time
|
| def example_keyword(date, interval):
| date = Date(date).convert('datetime')
| interval = Time(interval).convert('number')
| # ...
"""
from datetime import datetime, timedelta
import time
import re
from robot.version import get_version
from robot.utils import (elapsed_time_to_string, is_falsy, is_number,
is_string, secs_to_timestr, timestr_to_secs,
type_name, IRONPYTHON)
__version__ = get_version()
__all__ = ['convert_time', 'convert_date', 'subtract_date_from_date',
'subtract_time_from_date', 'subtract_time_from_time',
'add_time_to_time', 'add_time_to_date', 'get_current_date']
def get_current_date(time_zone='local', increment=0,
result_format='timestamp', exclude_millis=False):
"""Returns current local or UTC time with an optional increment.
Arguments:
- ``time_zone:`` Get the current time on this time zone. Currently only
``local`` (default) and ``UTC`` are supported.
- ``increment:`` Optional time increment to add to the returned date in
one of the supported `time formats`. Can be negative.
- ``result_format:`` Format of the returned date (see `date formats`).
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
Examples:
| ${date} = | Get Current Date |
| Should Be Equal | ${date} | 2014-06-12 20:00:58.946 |
| ${date} = | Get Current Date | UTC |
| Should Be Equal | ${date} | 2014-06-12 17:00:58.946 |
| ${date} = | Get Current Date | increment=02:30:00 |
| Should Be Equal | ${date} | 2014-06-12 22:30:58.946 |
| ${date} = | Get Current Date | UTC | - 5 hours |
| Should Be Equal | ${date} | 2014-06-12 12:00:58.946 |
| ${date} = | Get Current Date | result_format=datetime |
| Should Be Equal | ${date.year} | ${2014} |
| Should Be Equal | ${date.month} | ${6} |
"""
if time_zone.upper() == 'LOCAL':
dt = datetime.now()
elif time_zone.upper() == 'UTC':
dt = datetime.utcnow()
else:
raise ValueError("Unsupported timezone '%s'." % time_zone)
date = Date(dt) + Time(increment)
return date.convert(result_format, millis=is_falsy(exclude_millis))
def convert_date(date, result_format='timestamp', exclude_millis=False,
date_format=None):
"""Converts between supported `date formats`.
Arguments:
- ``date:`` Date in one of the supported `date formats`.
- ``result_format:`` Format of the returned date.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
- ``date_format:`` Specifies possible `custom timestamp` format.
Examples:
| ${date} = | Convert Date | 20140528 12:05:03.111 |
| Should Be Equal | ${date} | 2014-05-28 12:05:03.111 |
| ${date} = | Convert Date | ${date} | epoch |
| Should Be Equal | ${date} | ${1401267903.111} |
| ${date} = | Convert Date | 5.28.2014 12:05 | exclude_millis=yes | date_format=%m.%d.%Y %H:%M |
| Should Be Equal | ${date} | 2014-05-28 12:05:00 |
"""
return Date(date, date_format).convert(result_format,
millis=is_falsy(exclude_millis))
def convert_time(time, result_format='number', exclude_millis=False):
"""Converts between supported `time formats`.
Arguments:
- ``time:`` Time in one of the supported `time formats`.
- ``result_format:`` Format of the returned time.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
Examples:
| ${time} = | Convert Time | 10 seconds |
| Should Be Equal | ${time} | ${10} |
| ${time} = | Convert Time | 1:00:01 | verbose |
| Should Be Equal | ${time} | 1 hour 1 second |
| ${time} = | Convert Time | ${3661.5} | timer | exclude_milles=yes |
| Should Be Equal | ${time} | 01:01:02 |
"""
return Time(time).convert(result_format, millis=is_falsy(exclude_millis))
def subtract_date_from_date(date1, date2, result_format='number',
exclude_millis=False, date1_format=None,
date2_format=None):
"""Subtracts date from another date and returns time between.
Arguments:
- ``date1:`` Date to subtract another date from in one of the
supported `date formats`.
- ``date2:`` Date that is subtracted in one of the supported
`date formats`.
- ``result_format:`` Format of the returned time (see `time formats`).
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
- ``date1_format:`` Possible `custom timestamp` format of ``date1``.
- ``date2_format:`` Possible `custom timestamp` format of ``date2``.
Examples:
| ${time} = | Subtract Date From Date | 2014-05-28 12:05:52 | 2014-05-28 12:05:10 |
| Should Be Equal | ${time} | ${42} |
| ${time} = | Subtract Date From Date | 2014-05-28 12:05:52 | 2014-05-27 12:05:10 | verbose |
| Should Be Equal | ${time} | 1 day 42 seconds |
"""
time = Date(date1, date1_format) - Date(date2, date2_format)
return time.convert(result_format, millis=is_falsy(exclude_millis))
def add_time_to_date(date, time, result_format='timestamp',
exclude_millis=False, date_format=None):
"""Adds time to date and returns the resulting date.
Arguments:
- ``date:`` Date to add time to in one of the supported
`date formats`.
- ``time:`` Time that is added in one of the supported
`time formats`.
- ``result_format:`` Format of the returned date.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
- ``date_format:`` Possible `custom timestamp` format of ``date``.
Examples:
| ${date} = | Add Time To Date | 2014-05-28 12:05:03.111 | 7 days |
| Should Be Equal | ${date} | 2014-06-04 12:05:03.111 | |
| ${date} = | Add Time To Date | 2014-05-28 12:05:03.111 | 01:02:03:004 |
| Should Be Equal | ${date} | 2014-05-28 13:07:06.115 |
"""
date = Date(date, date_format) + Time(time)
return date.convert(result_format, millis=is_falsy(exclude_millis))
def subtract_time_from_date(date, time, result_format='timestamp',
exclude_millis=False, date_format=None):
"""Subtracts time from date and returns the resulting date.
Arguments:
- ``date:`` Date to subtract time from in one of the supported
`date formats`.
- ``time:`` Time that is subtracted in one of the supported
`time formats`.
- ``result_format:`` Format of the returned date.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
- ``date_format:`` Possible `custom timestamp` format of ``date``.
Examples:
| ${date} = | Subtract Time From Date | 2014-06-04 12:05:03.111 | 7 days |
| Should Be Equal | ${date} | 2014-05-28 12:05:03.111 |
| ${date} = | Subtract Time From Date | 2014-05-28 13:07:06.115 | 01:02:03:004 |
| Should Be Equal | ${date} | 2014-05-28 12:05:03.111 |
"""
date = Date(date, date_format) - Time(time)
return date.convert(result_format, millis=is_falsy(exclude_millis))
def add_time_to_time(time1, time2, result_format='number',
exclude_millis=False):
"""Adds time to another time and returns the resulting time.
Arguments:
- ``time1:`` First time in one of the supported `time formats`.
- ``time2:`` Second time in one of the supported `time formats`.
- ``result_format:`` Format of the returned time.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
Examples:
| ${time} = | Add Time To Time | 1 minute | 42 |
| Should Be Equal | ${time} | ${102} |
| ${time} = | Add Time To Time | 3 hours 5 minutes | 01:02:03 | timer | exclude_millis=yes |
| Should Be Equal | ${time} | 04:07:03 |
"""
time = Time(time1) + Time(time2)
return time.convert(result_format, millis=is_falsy(exclude_millis))
def subtract_time_from_time(time1, time2, result_format='number',
exclude_millis=False):
"""Subtracts time from another time and returns the resulting time.
Arguments:
- ``time1:`` Time to subtract another time from in one of
the supported `time formats`.
- ``time2:`` Time to subtract in one of the supported `time formats`.
- ``result_format:`` Format of the returned time.
- ``exclude_millis:`` When set to any true value, rounds and drops
milliseconds as explained in `millisecond handling`.
Examples:
| ${time} = | Subtract Time From Time | 00:02:30 | 100 |
| Should Be Equal | ${time} | ${50} |
| ${time} = | Subtract Time From Time | ${time} | 1 minute | compact |
| Should Be Equal | ${time} | - 10s |
"""
time = Time(time1) - Time(time2)
return time.convert(result_format, millis=is_falsy(exclude_millis))
class Date(object):
def __init__(self, date, input_format=None):
self.seconds = self._convert_date_to_seconds(date, input_format)
def _convert_date_to_seconds(self, date, input_format):
if is_string(date):
return self._string_to_epoch(date, input_format)
elif isinstance(date, datetime):
return self._mktime_with_millis(date)
elif is_number(date):
return float(date)
raise ValueError("Unsupported input '%s'." % date)
@property
def datetime(self):
return self._datetime_from_seconds(self.seconds)
def _string_to_epoch(self, ts, input_format):
if not input_format:
ts = self._normalize_timestamp(ts)
input_format = '%Y-%m-%d %H:%M:%S.%f'
if self._need_to_handle_f_directive(input_format):
return self._handle_un_supported_f_directive(ts, input_format)
return self._mktime_with_millis(datetime.strptime(ts, input_format))
def _normalize_timestamp(self, date):
ts = ''.join(d for d in date if d.isdigit())
if len(ts) < 8:
raise ValueError("Invalid timestamp '%s'." % date)
ts = ts.ljust(20, '0')
return '%s-%s-%s %s:%s:%s.%s' % (ts[:4], ts[4:6], ts[6:8], ts[8:10],
ts[10:12], ts[12:14], ts[14:])
def _need_to_handle_f_directive(self, format):
# https://github.com/IronLanguages/main/issues/1169
return IRONPYTHON and '%f' in format
def _handle_un_supported_f_directive(self, ts, input_format):
input_format = self._remove_f_from_format(input_format)
micro = re.search('\d+$', ts).group(0)
ts = ts[:-len(micro)]
epoch = time.mktime(time.strptime(ts, input_format))
epoch += float(micro) / 10**len(micro)
return epoch
def _remove_f_from_format(self, format):
if not format.endswith('%f'):
raise ValueError('%f directive is supported only at the end of '
'the format string on this Python interpreter.')
return format[:-2]
def _mktime_with_millis(self, dt):
return time.mktime(dt.timetuple()) + dt.microsecond / 1e6
def convert(self, format, millis=True):
seconds = self.seconds if millis else round(self.seconds)
if '%' in format:
return self._convert_to_custom_timestamp(seconds, format)
try:
result_converter = getattr(self, '_convert_to_%s' % format.lower())
except AttributeError:
raise ValueError("Unknown format '%s'." % format)
return result_converter(seconds, millis)
def _convert_to_custom_timestamp(self, seconds, format):
dt = self._datetime_from_seconds(seconds)
if not self._need_to_handle_f_directive(format):
return dt.strftime(format)
format = self._remove_f_from_format(format)
micro = round(seconds % 1 * 1e6)
return '%s%06d' % (dt.strftime(format), micro)
def _convert_to_timestamp(self, seconds, millis=True):
milliseconds = int(round(seconds % 1 * 1000))
if milliseconds == 1000:
seconds = round(seconds)
milliseconds = 0
dt = self._datetime_from_seconds(seconds)
ts = dt.strftime('%Y-%m-%d %H:%M:%S')
if millis:
ts += '.%03d' % milliseconds
return ts
def _datetime_from_seconds(self, ts):
# Workaround microsecond rounding errors with IronPython:
# https://github.com/IronLanguages/main/issues/1170
# Also Jython had similar problems, but they seem to be fixed in 2.7.
dt = datetime.fromtimestamp(ts)
return dt.replace(microsecond=int(round(ts % 1 * 1e6)))
def _convert_to_epoch(self, seconds, millis=True):
return seconds
def _convert_to_datetime(self, seconds, millis=True):
return self._datetime_from_seconds(seconds)
def __add__(self, other):
if isinstance(other, Time):
return Date(self.datetime + other.timedelta)
raise TypeError('Can only add Time to Date, got %s.' % type_name(other))
def __sub__(self, other):
if isinstance(other, Date):
return Time(self.datetime - other.datetime)
if isinstance(other, Time):
return Date(self.datetime - other.timedelta)
raise TypeError('Can only subtract Date or Time from Date, got %s.'
% type_name(other))
class Time(object):
def __init__(self, time):
self.seconds = self._convert_time_to_seconds(time)
def _convert_time_to_seconds(self, time):
if isinstance(time, timedelta):
# timedelta.total_seconds() is new in Python 2.7
return (time.days * 24 * 60 * 60 +
time.seconds +
time.microseconds / 1e6)
return timestr_to_secs(time, round_to=None)
@property
def timedelta(self):
return timedelta(seconds=self.seconds)
def convert(self, format, millis=True):
try:
result_converter = getattr(self, '_convert_to_%s' % format.lower())
except AttributeError:
raise ValueError("Unknown format '%s'." % format)
seconds = self.seconds if millis else round(self.seconds)
return result_converter(seconds, millis)
def _convert_to_number(self, seconds, millis=True):
return seconds
def _convert_to_verbose(self, seconds, millis=True):
return secs_to_timestr(seconds)
def _convert_to_compact(self, seconds, millis=True):
return secs_to_timestr(seconds, compact=True)
def _convert_to_timer(self, seconds, millis=True):
return elapsed_time_to_string(seconds * 1000, include_millis=millis)
def _convert_to_timedelta(self, seconds, millis=True):
return timedelta(seconds=seconds)
def __add__(self, other):
if isinstance(other, Time):
return Time(self.seconds + other.seconds)
raise TypeError('Can only add Time to Time, got %s.' % type_name(other))
def __sub__(self, other):
if isinstance(other, Time):
return Time(self.seconds - other.seconds)
raise TypeError('Can only subtract Time from Time, got %s.'
% type_name(other))
|
smkr/pyclipse
|
refs/heads/master
|
plugins/org.python.pydev.refactoring/tests/python/codegenerator/generatedocstring/testGenerateDocstringJump.py
|
8
|
def function():
'''
Return true.
'''
return True##|
##r
def function():
'''
Return true.##|
'''
return True
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_network_interface_ip_configurations_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/noSelf.py
|
83
|
__author__ = 'ktisha'
class Child(Base):
def <caret>f():
test = 1
|
lauria/Samba4
|
refs/heads/master
|
lib/dnspython/tests/set.py
|
59
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.set
# for convenience
S = dns.set.Set
class SimpleSetTestCase(unittest.TestCase):
def testLen1(self):
s1 = S()
self.failUnless(len(s1) == 0)
def testLen2(self):
s1 = S([1, 2, 3])
self.failUnless(len(s1) == 3)
def testLen3(self):
s1 = S([1, 2, 3, 3, 3])
self.failUnless(len(s1) == 3)
def testUnion1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion3(self):
s1 = S([1, 2, 3])
s2 = S([3, 4])
e = S([1, 2, 3, 4])
self.failUnless(s1 | s2 == e)
def testIntersection1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection2(self):
s1 = S([0, 1, 2, 3])
s2 = S([1, 2, 3, 4])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection3(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([])
self.failUnless(s1 & s2 == e)
def testIntersection4(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([])
self.failUnless(s1 & s2 == e)
def testDifference1(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference3(self):
s1 = S([1, 2, 3])
s2 = S([3, 2])
e = S([1])
self.failUnless(s1 - s2 == e)
def testDifference4(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
e = S([])
self.failUnless(s1 - s2 == e)
def testSubset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issubset(s2))
def testSubset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issubset(s1))
def testSubset3(self):
s1 = S([])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset4(self):
s1 = S([1])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issubset(s2))
def testSubset6(self):
s1 = S([1, 4])
s2 = S([1, 2, 3])
self.failUnless(not s1.issubset(s2))
def testSuperset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issuperset(s2))
def testSuperset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issuperset(s1))
def testSuperset3(self):
s1 = S([1, 2, 3])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset4(self):
s1 = S([1, 2, 3])
s2 = S([1])
self.failUnless(s1.issuperset(s2))
def testSuperset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset6(self):
s1 = S([1, 2, 3])
s2 = S([1, 4])
self.failUnless(not s1.issuperset(s2))
def testUpdate1(self):
s1 = S([1, 2, 3])
u = (4, 5, 6)
e = S([1, 2, 3, 4, 5, 6])
s1.update(u)
self.failUnless(s1 == e)
def testUpdate2(self):
s1 = S([1, 2, 3])
u = []
e = S([1, 2, 3])
s1.update(u)
self.failUnless(s1 == e)
def testGetitem(self):
s1 = S([1, 2, 3])
i0 = s1[0]
i1 = s1[1]
i2 = s1[2]
s2 = S([i0, i1, i2])
self.failUnless(s1 == s2)
def testGetslice(self):
s1 = S([1, 2, 3])
slice = s1[0:2]
self.failUnless(len(slice) == 2)
item = s1[2]
slice.append(item)
s2 = S(slice)
self.failUnless(s1 == s2)
def testDelitem(self):
s1 = S([1, 2, 3])
del s1[0]
i1 = s1[0]
i2 = s1[1]
self.failUnless(i1 != i2)
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
self.failUnless(i2 == 1 or i2 == 2 or i2 == 3)
def testDelslice(self):
s1 = S([1, 2, 3])
del s1[0:2]
i1 = s1[0]
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
if __name__ == '__main__':
unittest.main()
|
bakhtout/odoo-educ
|
refs/heads/8.0
|
openerp/addons/base/ir/ir_ui_menu.py
|
316
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import operator
import re
import threading
import openerp.modules
from openerp.osv import fields, osv
from openerp import api, tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
MENU_ITEM_SEPARATOR = "/"
class ir_ui_menu(osv.osv):
_name = 'ir.ui.menu'
def __init__(self, *args, **kwargs):
cls = type(self)
# by design, self._menu_cache is specific to the database
cls._menu_cache_lock = threading.RLock()
cls._menu_cache = {}
super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache')
def clear_cache(self):
with self._menu_cache_lock:
# radical but this doesn't frequently happen
if self._menu_cache:
# Normally this is done by openerp.tools.ormcache
# but since we do not use it, set it by ourself.
self.pool._any_cache_cleared = True
self._menu_cache.clear()
self.load_menus_root._orig.clear_cache(self)
self.load_menus._orig.clear_cache(self)
@api.multi
@api.returns('self')
def _filter_visible_menus(self):
""" Filter `self` to only keep the menu items that should be visible in
the menu hierarchy of the current user.
Uses a cache for speeding up the computation.
"""
with self._menu_cache_lock:
groups = self.env.user.groups_id
# visibility is entirely based on the user's groups;
# self._menu_cache[key] gives the ids of all visible menus
key = frozenset(groups._ids)
if key in self._menu_cache:
visible = self.browse(self._menu_cache[key])
else:
# retrieve all menus, and determine which ones are visible
context = {'ir.ui.menu.full_list': True}
menus = self.with_context(context).search([])
# first discard all menus with groups the user does not have
menus = menus.filtered(
lambda menu: not menu.groups_id or menu.groups_id & groups)
# take apart menus that have an action
action_menus = menus.filtered('action')
folder_menus = menus - action_menus
visible = self.browse()
# process action menus, check whether their action is allowed
access = self.env['ir.model.access']
model_fname = {
'ir.actions.act_window': 'res_model',
'ir.actions.report.xml': 'model',
'ir.actions.wizard': 'model',
'ir.actions.server': 'model_id',
}
for menu in action_menus:
fname = model_fname.get(menu.action._name)
if not fname or not menu.action[fname] or \
access.check(menu.action[fname], 'read', False):
# make menu visible, and its folder ancestors, too
visible += menu
menu = menu.parent_id
while menu and menu in folder_menus and menu not in visible:
visible += menu
menu = menu.parent_id
self._menu_cache[key] = visible._ids
return self.filtered(lambda menu: menu in visible)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
ids = super(ir_ui_menu, self).search(cr, uid, args, offset=0,
limit=None, order=order, context=context, count=False)
if not ids:
if count:
return 0
return []
# menu filtering is done only on main menu tree, not other menu lists
if context.get('ir.ui.menu.full_list'):
result = ids
else:
result = self._filter_visible_menus(cr, uid, ids, context=context)
if offset:
result = result[long(offset):]
if limit:
result = result[:long(limit)]
if count:
return len(result)
return result
def name_get(self, cr, uid, ids, context=None):
res = []
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context is None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id:
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + MENU_ITEM_SEPARATOR
else:
parent_path = ''
return parent_path + elmt.name
def create(self, cr, uid, values, context=None):
self.clear_cache()
return super(ir_ui_menu, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self.clear_cache()
return super(ir_ui_menu, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# Detach children and promote them to top-level, because it would be unwise to
# cascade-delete submenus blindly. We also can't use ondelete=set null because
# that is not supported when _parent_store is used (would silently corrupt it).
# TODO: ideally we should move them under a generic "Orphans" menu somewhere?
if isinstance(ids, (int, long)):
ids = [ids]
local_context = dict(context or {})
local_context['ir.ui.menu.full_list'] = True
direct_children_ids = self.search(cr, uid, [('parent_id', 'in', ids)], context=local_context)
if direct_children_ids:
self.write(cr, uid, direct_children_ids, {'parent_id': False})
result = super(ir_ui_menu, self).unlink(cr, uid, ids, context=context)
self.clear_cache()
return result
def copy(self, cr, uid, id, default=None, context=None):
ir_values_obj = self.pool.get('ir.values')
res = super(ir_ui_menu, self).copy(cr, uid, id, default=default, context=context)
datas=self.read(cr,uid,[res],['name'])[0]
rex=re.compile('\([0-9]+\)')
concat=rex.findall(datas['name'])
if concat:
next_num=int(concat[0])+1
datas['name']=rex.sub(('(%d)'%next_num),datas['name'])
else:
datas['name'] += '(1)'
self.write(cr,uid,[res],{'name':datas['name']})
ids = ir_values_obj.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('res_id', '=', id),
])
for iv in ir_values_obj.browse(cr, uid, ids):
ir_values_obj.copy(cr, uid, iv.id, default={'res_id': res},
context=context)
return res
def _action(self, cursor, user, ids, name, arg, context=None):
res = {}
ir_values_obj = self.pool.get('ir.values')
value_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', 'in', ids)],
context=context)
values_action = {}
for value in ir_values_obj.browse(cursor, user, value_ids, context=context):
values_action[value.res_id] = value.value
for menu_id in ids:
res[menu_id] = values_action.get(menu_id, False)
return res
def _action_inv(self, cursor, user, menu_id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
ir_values_obj = self.pool.get('ir.values')
values_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', '=', menu_id)],
context=context)
if value and values_ids:
ir_values_obj.write(cursor, user, values_ids, {'value': value}, context=ctx)
elif value:
# no values_ids, create binding
ir_values_obj.create(cursor, user, {
'name': 'Menuitem',
'model': self._name,
'value': value,
'key': 'action',
'key2': 'tree_but_open',
'res_id': menu_id,
}, context=ctx)
elif values_ids:
# value is False, remove existing binding
ir_values_obj.unlink(cursor, user, values_ids, context=ctx)
def _get_icon_pict(self, cr, uid, ids, name, args, context):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = ('stock', (m.icon,'ICON_SIZE_MENU'))
return res
def onchange_icon(self, cr, uid, ids, icon):
if not icon:
return {}
return {'type': {'icon_pict': 'picture'}, 'value': {'icon_pict': ('stock', (icon,'ICON_SIZE_MENU'))}}
def read_image(self, path):
if not path:
return False
path_info = path.split(',')
icon_path = openerp.modules.get_module_resource(path_info[0],path_info[1])
icon_image = False
if icon_path:
try:
icon_file = tools.file_open(icon_path,'rb')
icon_image = base64.encodestring(icon_file.read())
finally:
icon_file.close()
return icon_image
def _get_image_icon(self, cr, uid, ids, names, args, context=None):
res = {}
for menu in self.browse(cr, uid, ids, context=context):
res[menu.id] = r = {}
for fn in names:
fn_src = fn[:-5] # remove _data
r[fn] = self.read_image(menu[fn_src])
return res
def _get_needaction_enabled(self, cr, uid, ids, field_names, args, context=None):
""" needaction_enabled: tell whether the menu has a related action
that uses the needaction mechanism. """
res = dict.fromkeys(ids, False)
for menu in self.browse(cr, uid, ids, context=context):
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
if menu.action.res_model in self.pool and self.pool[menu.action.res_model]._needaction:
res[menu.id] = True
return res
def get_needaction_data(self, cr, uid, ids, context=None):
""" Return for each menu entry of ids :
- if it uses the needaction mechanism (needaction_enabled)
- the needaction counter of the related action, taking into account
the action domain
"""
if context is None:
context = {}
res = {}
menu_ids = set()
for menu in self.browse(cr, uid, ids, context=context):
menu_ids.add(menu.id)
ctx = None
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context:
try:
# use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id`
eval_ctx = tools.UnquoteEvalContext(**context)
ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None
except Exception:
# if the eval still fails for some reason, we'll simply skip this menu
pass
menu_ref = ctx and ctx.get('needaction_menu_ref')
if menu_ref:
if not isinstance(menu_ref, list):
menu_ref = [menu_ref]
model_data_obj = self.pool.get('ir.model.data')
for menu_data in menu_ref:
try:
model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1])
if (model == 'ir.ui.menu'):
menu_ids.add(id)
except Exception:
pass
menu_ids = list(menu_ids)
for menu in self.browse(cr, uid, menu_ids, context=context):
res[menu.id] = {
'needaction_enabled': False,
'needaction_counter': False,
}
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
if menu.action.res_model in self.pool:
obj = self.pool[menu.action.res_model]
if obj._needaction:
if menu.action.type == 'ir.actions.act_window':
dom = menu.action.domain and eval(menu.action.domain, {'uid': uid}) or []
else:
dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain')
res[menu.id]['needaction_enabled'] = obj._needaction
res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=context)
return res
def get_user_roots(self, cr, uid, context=None):
""" Return all root menu ids visible for the user.
:return: the root menu ids
:rtype: list(int)
"""
menu_domain = [('parent_id', '=', False)]
return self.search(cr, uid, menu_domain, context=context)
@api.cr_uid_context
@tools.ormcache_context(accepted_keys=('lang',))
def load_menus_root(self, cr, uid, context=None):
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
return {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
@api.cr_uid_context
@tools.ormcache_context(accepted_keys=('lang',))
def load_menus(self, cr, uid, context=None):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = self.search(cr, uid, [('id', 'child_of', menu_root_ids)], 0, False, False, context=context)
menu_items = self.read(cr, uid, menu_ids, fields, context=context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'sequence': fields.integer('Sequence'),
'child_id': fields.one2many('ir.ui.menu', 'parent_id', 'Child IDs'),
'parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', select=True, ondelete="restrict"),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
'groups_id': fields.many2many('res.groups', 'ir_ui_menu_group_rel',
'menu_id', 'gid', 'Groups', help="If you have groups, the visibility of this menu will be based on these groups. "\
"If this field is empty, Odoo will compute visibility based on the related object's read access."),
'complete_name': fields.function(_get_full_name,
string='Full Path', type='char', size=128),
'icon': fields.selection(tools.icons, 'Icon', size=64),
'icon_pict': fields.function(_get_icon_pict, type='char', size=32),
'web_icon': fields.char('Web Icon File'),
'web_icon_hover': fields.char('Web Icon File (hover)'),
'web_icon_data': fields.function(_get_image_icon, string='Web Icon Image', type='binary', readonly=True, store=True, multi='icon'),
'web_icon_hover_data': fields.function(_get_image_icon, string='Web Icon Image (hover)', type='binary', readonly=True, store=True, multi='icon'),
'needaction_enabled': fields.function(_get_needaction_enabled,
type='boolean',
store=True,
string='Target model uses the need action mechanism',
help='If the menu entry action is an act_window action, and if this action is related to a model that uses the need_action mechanism, this field is set to true. Otherwise, it is false.'),
'action': fields.function(_action, fnct_inv=_action_inv,
type='reference', string='Action', size=21,
selection=[
('ir.actions.report.xml', 'ir.actions.report.xml'),
('ir.actions.act_window', 'ir.actions.act_window'),
('ir.actions.wizard', 'ir.actions.wizard'),
('ir.actions.act_url', 'ir.actions.act_url'),
('ir.actions.server', 'ir.actions.server'),
('ir.actions.client', 'ir.actions.client'),
]),
}
def _rec_message(self, cr, uid, ids, context=None):
return _('Error ! You can not create recursive Menu.')
_constraints = [
(osv.osv._check_recursion, _rec_message, ['parent_id'])
]
_defaults = {
'icon': 'STOCK_OPEN',
'icon_pict': ('stock', ('STOCK_OPEN', 'ICON_SIZE_MENU')),
'sequence': 10,
}
_order = "sequence,id"
_parent_store = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nDroidProject/nDroid-kernel
|
refs/heads/nDroid-1.6
|
tools/perf/util/setup.py
|
989
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
PiRSquared17/stoqs
|
refs/heads/master
|
loaders/CANON/toNetCDF/uctdToNetcdf.py
|
5
|
#!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2012, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__doc__ = '''
Script to read data from underway ctd files and write them to netCDF files.
Use the conventions for Trajectory feature type and write as much metadata as possible.
This script is meant to preserve the data identically as it is reported in the orignal files.
Mike McCann
MBARI 11 January 2014
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@license: __license__
'''
import os
import sys
import csv
import time
import pytz
from glob import glob
import coards
import urllib2
from datetime import datetime, timedelta
import numpy as np
from pupynere import netcdf_file
# Add grandparent dir to pythonpath so that we can see the CANON and toNetCDF modules
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../") )
from CANON.toNetCDF import BaseWriter
class ParserWriter(BaseWriter):
'''
Handle all information needed to parse Underway CTD data
and write the data as a CF-compliant NetCDF Trajectory files.
'''
_FillValue = -888888
missing_value = -999999
def process_files(self):
if not self.args.depth:
raise Exception('Must specify --depth for UCTD data')
if self.args.format == 'Martin_UDAS':
print "Processing %s .txt files from directory '%s' with pattern '%s'" % (self.args.format, self.args.inDir, self.args.pattern)
self.process_martinudas_files()
else:
print "Processing Sea-Bird .asc and .hdr files from directory '%s' with pattern '%s'" % (self.args.inDir, self.args.pattern)
self.process_seabird_files()
def initialize_lists(self):
self.esec_list = []
self.lat_list = []
self.lon_list = []
self.dep_list = [] # Nominal depth, e.g. 2.0 for Western Flyer, 1.5 for Rachel Carson
self.t1_list = []
self.sal_list = []
self.xmiss_list = []
self.wetstar_list = []
self.fl_scufa_list = []
self.turb_scufa_list = []
def process_seabird_files(self):
'''
Loop through all SeaBird .asc files in inDir matching pattern and load data into lists and call the write_ctd() method.
Processed *.asc files look like:
TimeJ Latitude Longitude C0S/m T090C T190C Sal00 Xmiss Bat V1 WetStar V0 Upoly0 V2 Nbin Flag
259.284912 36.11671 -122.19104 4.150175 15.4155 15.2129 33.3560 81.7596 0.8056 4.2088 4.1208 0.3327 33.292633 2.3317 6 0.0000e+00
259.285583 36.11664 -122.19093 4.150087 15.4121 15.2068 33.3581 81.8932 0.7990 4.2155 4.0995 0.3313 33.521572 2.3409 6 0.0000e+00
259.286285 36.11653 -122.19081 4.148937 15.4002 15.2046 33.3579 81.8649 0.8004 4.2141 4.0903 0.3307 32.890720 2.3156 6 0.0000e+00
The realtime.asc file looks like:
(No header, but the same columns except that the Flag column is replaced by Unix epoch seconds)
261.7066551 36.114080 -122.200600 4.1550689 16.91964 16.69642 32.15976 76.97320 1.04685 3.96825 3.25088 0.27473 32.75335775 2.31013 1347926255
261.7067708 36.114440 -122.200360 4.1560253 16.91517 16.74474 32.17160 79.59720 0.91277 4.10012 3.28751 0.27717 32.50915751 2.30037 1347926265
261.7068866 36.114800 -122.200120 4.1555998 16.91589 16.72590 32.16735 79.86446 0.89936 4.11355 3.30582 0.27839 31.59340659 2.26374 1347926275
'''
# Fill up the object's member data item lists from all the files - read only the processed .asc files that match the specified pattern,
fileList = glob(os.path.join(self.args.inDir, self.args.pattern))
fileList.sort()
for file in fileList:
print "file = %s" % file
self.initialize_lists()
# Open .hdr file to get the year, parse year from a line like this:
# * System UTC = Sep 15 2012 06:49:50
for line in open('.'.join(file.split('.')[:-1]) + '.hdr'):
if line.find('NMEA UTC (Time)') != -1:
year = int(line.split(' ')[7])
##print "year = %d" % year
break
for r in csv.DictReader(open(file), delimiter=' ', skipinitialspace=True):
# A TimeJ value of 1.0 is 0000 hours 1 January, so subtract 1 day
dt = datetime(year, 1, 1, 0, 0, 0) + timedelta(days=float(r['TimeJ'])) - timedelta(days=1)
##print 'dt = ', dt
esDiff = dt - datetime(1970, 1, 1, 0, 0, 0)
es = 86400 * esDiff.days + esDiff.seconds
##print 'es = ', es, datetime.fromtimestamp(es)
##print 'r = ', r
self.esec_list.append(es)
self.lat_list.append(r['Latitude'])
self.lon_list.append(r['Longitude'])
self.dep_list.append(self.args.depth)
self.t1_list.append(r['T090C'])
self.sal_list.append(r['Sal00'])
self.xmiss_list.append(r['Xmiss'])
self.wetstar_list.append(r['WetStar'])
self.write_ctd(file)
def process_martinudas_files(self):
'''
Loop through all Martin_UDAS .txt files in inDir matching pattern and load data into lists and call the write_ctd() method.
Perform some range-checking quality control and describe the QC performed in the summary text added to the netCDF metadata.
Processed *.txt files look like:
R/V_John_H._Martin_Underway_Data_Acquisition_System
YYYYMMDD HHMMSS_Local GMT Decimal_Julian_Day Decimal_Hour Latitude Longitude Depth Salinity_SBE45 Temperature_degrees_C_SBE45 Conductivity_S/m_SBE45 Raw_Fluorescence_Volts_Scufa Turbidity_Scufa Temperature_degrees_C_Scufa Percent_Humidity Barometer_Inches_Hg Barometer_mBar Air_Temp_C Air_Temp_F Average_Relative_Wind_Direction Average_Relative_Wind_Speed Average_True_Wind_Direction Average_True_Wind_Speed Average_Course_Over_Ground Average_Speed_Over_Ground Vector_Average_Heading
20130923 084848 15:48:48 266.36722 8.81333 0.'000000 0.0'00000 0.000000 0.937900 17.355200 0.156940 0.000 0.000 0.000 87.000 30.357 1028.000 14.933 58.880 340.333 11.200 27.685 12.491 212.167 16.998 244.609
20130923 084859 15:48:59 266.36735 8.81639 36'48.172 121'47.832 0.000000 1.526000 17.353901 0.249250 0.289 0.280 17.800 87.000 30.357 1028.000 14.900 58.820 345.334 10.867 39.376 6.665 205.033 17.198 211.000
20130923 084906 15:49:06 266.36743 8.81833 36'48.148 121'47.852 0.000000 2.836700 17.313601 0.446990 0.291 0.277 17.800 87.000 30.357 1028.000 14.867 58.760 344.667 12.000 26.573 5.114 196.433 16.998 207.467
'''
# Allowed min & max values for range-check QC
ranges = { 'Salinity_SBE45': (30, 40),
'Temperature_degrees_C_Scufa': (8, 20),
}
# Fill up the object's member data item lists from all the files - read only the processed .asc files that match the specified pattern,
fileList = glob(os.path.join(self.args.inDir, self.args.pattern))
fileList.sort()
for file in fileList:
print "file = %s" % file
self.initialize_lists()
# Need to skip over first line in the data file, assume that the times are in Moss Landing Time zone
fh = open(file)
fh.seek(0)
next(fh)
localtz = pytz.timezone ("America/Los_Angeles")
utc = pytz.timezone ("UTC")
for r in csv.DictReader(fh, delimiter=' ', skipinitialspace=True):
if self.args.verbose:
print 'r = ', r
for k,v in r.iteritems():
print '%s: %s' % (k, v)
# Skip over clearly bad values
if r['Latitude'] == "0.'000000":
continue
if float(r['Salinity_SBE45']) < ranges['Salinity_SBE45'][0] or float(r['Salinity_SBE45']) > ranges['Salinity_SBE45'][1]:
continue
# Convert local time to GMT
dt_naive = datetime(int(r['YYYYMMDD'][0:4]), int(r['YYYYMMDD'][4:6]), int(r['YYYYMMDD'][6:8]),
int(r['HHMMSS_Local'][0:2]), int(r['HHMMSS_Local'][2:4]), int(r['HHMMSS_Local'][4:6]))
local_dt = localtz.localize(dt_naive, is_dst=None)
es = time.mktime(local_dt.astimezone(pytz.utc).timetuple())
if self.args.verbose:
print local_dt, local_dt.astimezone(pytz.utc), es
self.esec_list.append(es)
# Convert degrees ' decimal minutes to decimal degrees. Need to negate longitude
lat = float(r['Latitude'].split("'")[0]) + float(r['Latitude'].split("'")[1]) / 60.0
self.lat_list.append(lat)
lon = float(r['Longitude'].split("'")[0]) + float(r['Longitude'].split("'")[1]) / 60.0
self.lon_list.append(-lon)
if self.args.verbose:
print lon, lat
self.dep_list.append(self.args.depth)
# The data
self.t1_list.append(r['Temperature_degrees_C_Scufa'])
self.sal_list.append(r['Salinity_SBE45'])
turb_scufa_val = self._FillValue
if r['Turbidity_Scufa']:
if r['Turbidity_Scufa'] != 'None':
turb_scufa_val = r['Turbidity_Scufa']
self.turb_scufa_list.append(turb_scufa_val)
self.fl_scufa_val = self._FillValue
if r['Raw_Fluorescence_Volts_Scufa']:
if r['Raw_Fluorescence_Volts_Scufa'] != 'None':
self.fl_scufa_val = r['Raw_Fluorescence_Volts_Scufa']
self.fl_scufa_list.append(self.fl_scufa_val)
self.write_ctd(file, ranges)
def write_ctd(self, inFile, ranges={}):
'''
Write lists out as NetCDF.
'''
# Create the NetCDF file
outFile = '.'.join(inFile.split('.')[:-1]) + '.nc'
self.ncFile = netcdf_file(outFile, 'w')
# If specified on command line override the default generic title with what is specified
self.ncFile.title = 'Underway CTD data'
if self.args.title:
self.ncFile.title = self.args.title
# Combine any summary text specified on commamd line with the generic summary stating the original source file
self.ncFile.summary = 'Observational oceanographic data translated with no modification from original data file %s' % inFile
if self.args.summary:
self.ncFile.summary = self.args.summary
if not self.args.summary.endswith('.'):
self.ncFile.summary += '.'
self.ncFile.summary += ' Translated with no modification from original data file %s' % inFile
# Add range-checking QC paramters to the summary
if ranges:
self.ncFile.summary += '. Range checking QC performed on the following variables with values outside of associated ranges discarded: %s' % ranges
# If specified on command line override the default generic license with what is specified
if self.args.license:
self.ncFile.license = self.args.license
# Trajectory dataset, time is the only netCDF dimension
self.ncFile.createDimension('time', len(self.esec_list))
self.time = self.ncFile.createVariable('time', 'float64', ('time',))
self.time.standard_name = 'time'
self.time.units = 'seconds since 1970-01-01'
self.time[:] = self.esec_list
# Record Variables - coordinates for trajectory - save in the instance and use for metadata generation
self.latitude = self.ncFile.createVariable('latitude', 'float64', ('time',))
self.latitude.long_name = 'LATITUDE'
self.latitude.standard_name = 'latitude'
self.latitude.units = 'degree_north'
self.latitude[:] = self.lat_list
self.longitude = self.ncFile.createVariable('longitude', 'float64', ('time',))
self.longitude.long_name = 'LONGITUDE'
self.longitude.standard_name = 'longitude'
self.longitude.units = 'degree_east'
self.longitude[:] = self.lon_list
self.depth = self.ncFile.createVariable('depth', 'float64', ('time',))
self.depth.long_name = 'DEPTH'
self.depth.standard_name = 'depth'
self.depth.units = 'm'
self.depth[:] = self.dep_list
# Record Variables - Underway CTD Data
temp = self.ncFile.createVariable('TEMP', 'float64', ('time',))
temp.long_name = 'Temperature, 2 [ITS-90, deg C]'
temp.standard_name = 'sea_water_temperature'
temp.coordinates = 'time depth latitude longitude'
temp.units = 'Celsius'
temp._FillValue = self._FillValue
temp.missing_value = self.missing_value
temp[:] = self.t1_list
sal = self.ncFile.createVariable('PSAL', 'float64', ('time',))
sal.long_name = 'Salinity, Practical [PSU]'
sal.standard_name = 'sea_water_salinity'
sal.coordinates = 'time depth latitude longitude'
sal._FillValue = self._FillValue
sal.missing_value = self.missing_value
sal[:] = self.sal_list
if self.xmiss_list:
xmiss = self.ncFile.createVariable('xmiss', 'float64', ('time',))
xmiss.long_name = 'Beam Transmission, Chelsea/Seatech'
xmiss.coordinates = 'time depth latitude longitude'
xmiss.units = '%'
xmiss._FillValue = self._FillValue
xmiss.missing_value = self.missing_value
xmiss[:] = self.xmiss_list
if self.wetstar_list:
wetstar = self.ncFile.createVariable('wetstar', 'float64', ('time',))
wetstar.long_name = 'Fluorescence, WET Labs WETstar'
wetstar.coordinates = 'time depth latitude longitude'
wetstar.units = 'mg/m^3'
wetstar._FillValue = self._FillValue
wetstar.missing_value = self.missing_value
wetstar[:] = self.wetstar_list
if self.turb_scufa_list:
turb_scufa = self.ncFile.createVariable('turb_scufa', 'float64', ('time',))
turb_scufa.long_name = 'Turbidity_Scufa'
turb_scufa.coordinates = 'time depth latitude longitude'
turb_scufa.units = 'NTU'
turb_scufa._FillValue = self._FillValue
turb_scufa.missing_value = self.missing_value
turb_scufa[:] = self.turb_scufa_list
if self.fl_scufa_list:
fl_scufa = self.ncFile.createVariable('fl_scufa', 'float64', ('time',))
fl_scufa.long_name = 'Raw_Fluorescence_Volts_Scufa'
fl_scufa.coordinates = 'time depth latitude longitude'
fl_scufa.units = 'volts'
fl_scufa._FillValue = self._FillValue
fl_scufa.missing_value = self.missing_value
fl_scufa[:] = self.fl_scufa_list
self.add_global_metadata()
self.ncFile.close()
print "Wrote %s" % outFile
# End write_ctd()
if __name__ == '__main__':
pw = ParserWriter()
pw.process_command_line()
pw.process_files()
print "Done."
|
SaschaMester/Zahlenraten
|
refs/heads/master
|
ZahlenratenSmall/zahlenraten_s.py
|
1
|
#! /usr/bin/env python3
# ~*~ encoding: utf-8 ~*~
from random import randint
eingegeben = False
geraten = False
while not eingegeben:
zahl = input("Bitte gebe eine Zahl > 50 ein: ")
try:
zahlInt = int(zahl)
except ValueError:
print("Nur Ganzzahlen erlaubt!")
continue
if zahlInt <= 50:
continue
eingegeben = True
try:
ausgedachteZahl = randint(1, zahlInt)
except ValueError:
print("Ungültige Eingabe")
quit()
print("Ich habe mir eine Zahl zwischen 1 und " + zahl + " ausgedacht.")
print("Es ist an dir, meine Zahl zu erraten.")
while not geraten:
userzahl = input("Bitte gebe deinen Rateversuch ein: ")
try:
userzahlInt = abs(int(userzahl))
except ValueError:
print("Es sind nur ganze Zahlen erlaubt!")
continue
if userzahlInt == 0:
print("Auf Wiedersehen!")
quit()
if abs(userzahlInt) < ausgedachteZahl:
print("Meine Zahl ist größer als " + userzahl + ".")
elif abs(userzahlInt) > ausgedachteZahl:
print("Meine Zahl ist kleiner als " + userzahl + ".")
else:
geraten = True
while geraten:
print("Herzlichen Glückwunsch!")
quit()
|
vladryk/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/loadbalancers/tabs.py
|
30
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers import tables
class PoolsTab(tabs.TableTab):
table_classes = (tables.PoolsTable,)
name = _("Pools")
slug = "pools"
template_name = "horizon/common/_detail_table.html"
def get_poolstable_data(self):
pools = []
try:
request = self.tab_group.request
tenant_id = self.request.user.tenant_id
pools = api.lbaas.pool_list(request,
tenant_id=tenant_id)
fips = None
for pool in pools:
if hasattr(pool, "vip") and pool.vip:
if not fips:
fips = api.network.tenant_floating_ip_list(request)
vip_fip = [fip for fip in fips
if fip.port_id == pool.vip.port_id]
if vip_fip:
pool.vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve pools list.'))
return pools
class MembersTab(tabs.TableTab):
table_classes = (tables.MembersTable,)
name = _("Members")
slug = "members"
template_name = "horizon/common/_detail_table.html"
def get_memberstable_data(self):
try:
tenant_id = self.request.user.tenant_id
members = api.lbaas.member_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
members = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member list.'))
return members
class MonitorsTab(tabs.TableTab):
table_classes = (tables.MonitorsTable,)
name = _("Monitors")
slug = "monitors"
template_name = "horizon/common/_detail_table.html"
def get_monitorstable_data(self):
try:
tenant_id = self.request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
monitors = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor list.'))
return monitors
class LoadBalancerTabs(tabs.TabGroup):
slug = "lbtabs"
tabs = (PoolsTab, MembersTab, MonitorsTab)
sticky = True
class PoolDetailsTab(tabs.Tab):
name = _("Pool Details")
slug = "pooldetails"
template_name = "project/loadbalancers/_pool_details.html"
def get_context_data(self, request):
pool = self.tab_group.kwargs['pool']
return {'pool': pool}
class VipDetailsTab(tabs.Tab):
name = _("VIP Details")
slug = "vipdetails"
template_name = "project/loadbalancers/_vip_details.html"
def get_context_data(self, request):
vid = self.tab_group.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(request, vid)
fips = api.network.tenant_floating_ip_list(self.tab_group.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve VIP details.'))
return {'vip': vip}
class MemberDetailsTab(tabs.Tab):
name = _("Member Details")
slug = "memberdetails"
template_name = "project/loadbalancers/_member_details.html"
def get_context_data(self, request):
member = self.tab_group.kwargs['member']
return {'member': member}
class MonitorDetailsTab(tabs.Tab):
name = _("Monitor Details")
slug = "monitordetails"
template_name = "project/loadbalancers/_monitor_details.html"
def get_context_data(self, request):
monitor = self.tab_group.kwargs['monitor']
return {'monitor': monitor}
class PoolDetailsTabs(tabs.TabGroup):
slug = "pooltabs"
tabs = (PoolDetailsTab,)
class VipDetailsTabs(tabs.TabGroup):
slug = "viptabs"
tabs = (VipDetailsTab,)
class MemberDetailsTabs(tabs.TabGroup):
slug = "membertabs"
tabs = (MemberDetailsTab,)
class MonitorDetailsTabs(tabs.TabGroup):
slug = "monitortabs"
tabs = (MonitorDetailsTab,)
|
hivelocity/python-ubersmith
|
refs/heads/master
|
ubersmith/uber.py
|
2
|
"""Uber call functions.
These are light weight call functions that basically just wrap call classes
under ubersmith.calls. If a call function doesn't exist it will be generated
by generate_generic_calls which searches for a call class and if one isn't
found one is created using ubersmith.calls.BaseCall.
"""
from ubersmith.calls import generate_generic_calls
__all__ = []
generate_generic_calls(__name__.split('.')[-1], globals())
|
IndonesiaX/edx-platform
|
refs/heads/master
|
common/djangoapps/student/management/commands/add_to_group.py
|
182
|
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--list',
action='store_true',
dest='list',
default=False,
help='List available groups'),
make_option('--create',
action='store_true',
dest='create',
default=False,
help='Create the group if it does not exist'),
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the user from the group instead of adding it'),
)
args = '<user|email> <group>'
help = 'Add a user to a group'
def print_groups(self):
print 'Groups available:'
for group in Group.objects.all().distinct():
print ' ', group.name
def handle(self, *args, **options):
if options['list']:
self.print_groups()
return
if len(args) != 2:
raise CommandError('Usage is add_to_group {0}'.format(self.args))
name_or_email, group_name = args
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
try:
group = Group.objects.get(name=group_name)
except Group.DoesNotExist:
if options['create']:
group = Group(name=group_name)
group.save()
else:
raise CommandError('Group {} does not exist'.format(group_name))
if options['remove']:
user.groups.remove(group)
else:
user.groups.add(group)
print 'Success!'
|
flotre/sickbeard-vfvo
|
refs/heads/master
|
lib/hachoir_parser/misc/bplist.py
|
90
|
"""
Apple/NeXT Binary Property List (BPLIST) parser.
Also includes a .createXML() function which produces an XML representation of the object.
Note that it will discard unknown objects, nulls and fill values, but should work for most files.
Documents:
- CFBinaryPList.c
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c
- ForFoundationOnly.h (for structure formats)
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h
- XML <-> BPList converter
http://scw.us/iPhone/plutil/plutil.pl
Author: Robert Xiao
Created: 2008-09-21
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum,
Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import displayHandler
from lib.hachoir_core.tools import humanDatetime
from datetime import datetime, timedelta
class BPListTrailer(FieldSet):
def createFields(self):
yield NullBytes(self, "unused", 6)
yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table")
yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references")
yield UInt64(self, "numObjects", "Number of objects in this file")
yield UInt64(self, "topObject", "Top-level object reference")
yield UInt64(self, "offsetTableOffset", "File offset to the offset table")
def createDescription(self):
return "Binary PList trailer"
class BPListOffsetTable(FieldSet):
def createFields(self):
size = self["../trailer/offsetIntSize"].value*8
for i in range(self["../trailer/numObjects"].value):
yield Bits(self, "offset[]", size)
class BPListSize(FieldSet):
def createFields(self):
yield Bits(self, "size", 4)
if self['size'].value == 0xF:
yield BPListObject(self, "fullsize")
def createValue(self):
if 'fullsize' in self:
return self['fullsize'].value
else:
return self['size'].value
class BPListObjectRef(GenericInteger):
def __init__(self, parent, name, description=None):
size = parent['/trailer/objectRefSize'].value*8
GenericInteger.__init__(self, parent, name, False, size, description)
def getRef(self):
return self.parent['/object[' + str(self.value) + ']']
def createDisplay(self):
return self.getRef().display
def createXML(self, prefix=''):
return self.getRef().createXML(prefix)
class BPListArray(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "ref[]")
def createValue(self):
return self.array('ref')
def createDisplay(self):
return '[' + ', '.join([x.display for x in self.value]) + ']'
def createXML(self,prefix=''):
return prefix + '<array>\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '</array>'
class BPListDict(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "keyref[]")
for i in range(self.numels):
yield BPListObjectRef(self, "valref[]")
def createValue(self):
return zip(self.array('keyref'),self.array('valref'))
def createDisplay(self):
return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}'
def createXML(self, prefix=''):
return prefix + '<dict>\n' + ''.join(['%s\t<key>%s</key>\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '</dict>'
class BPListObject(FieldSet):
def createFields(self):
yield Enum(Bits(self, "marker_type", 4),
{0: "Simple",
1: "Int",
2: "Real",
3: "Date",
4: "Data",
5: "ASCII String",
6: "UTF-16-BE String",
8: "UID",
10: "Array",
13: "Dict",})
markertype = self['marker_type'].value
if markertype == 0:
# Simple (Null)
yield Enum(Bits(self, "value", 4),
{0: "Null",
8: "False",
9: "True",
15: "Fill Byte",})
if self['value'].display == "False":
self.xml=lambda prefix:prefix + "<false/>"
elif self['value'].display == "True":
self.xml=lambda prefix:prefix + "<true/>"
else:
self.xml=lambda prefix:prefix + ""
elif markertype == 1:
# Int
yield Bits(self, "size", 4, "log2 of number of bytes")
size=self['size'].value
# 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned
# 64-bit (size=3) numbers are signed
yield GenericInteger(self, "value", (size>=3), (2**size)*8)
self.xml=lambda prefix:prefix + "<integer>%s</integer>"%self['value'].value
elif markertype == 2:
# Real
yield Bits(self, "size", 4, "log2 of number of bytes")
if self['size'].value == 2: # 2**2 = 4 byte float
yield Float32(self, "value")
elif self['size'].value == 3: # 2**3 = 8 byte float
yield Float64(self, "value")
else:
# FIXME: What is the format of the real?
yield Bits(self, "value", (2**self['size'].value)*8)
self.xml=lambda prefix:prefix + "<real>%s</real>"%self['value'].value
elif markertype == 3:
# Date
yield Bits(self, "extra", 4, "Extra value, should be 3")
cvt_time=lambda v:datetime(2001,1,1) + timedelta(seconds=v)
yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x)))
self.xml=lambda prefix:prefix + "<date>%s</date>"%(cvt_time(self['value'].value).isoformat())
elif markertype == 4:
# Data
yield BPListSize(self, "size")
if self['size'].value:
yield Bytes(self, "value", self['size'].value)
self.xml=lambda prefix:prefix + "<data>\n%s\n%s</data>"%(self['value'].value.encode('base64').strip(),prefix)
else:
self.xml=lambda prefix:prefix + '<data></data>'
elif markertype == 5:
# ASCII String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value, charset="ASCII")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('iso-8859-1'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 6:
# UTF-16-BE String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value*2, charset="UTF-16-BE")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.encode('utf-8'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 8:
# UID
yield Bits(self, "size", 4, "Number of bytes minus 1")
yield GenericInteger(self, "value", False, (self['size'].value + 1)*8)
self.xml=lambda prefix:prefix + "" # no equivalent?
elif markertype == 10:
# Array
yield BPListSize(self, "size")
size = self['size'].value
if size:
yield BPListArray(self, "value", size)
self.xml=lambda prefix:self['value'].createXML(prefix)
elif markertype == 13:
# Dict
yield BPListSize(self, "size")
yield BPListDict(self, "value", self['size'].value)
self.xml=lambda prefix:self['value'].createXML(prefix)
else:
yield Bits(self, "value", 4)
self.xml=lambda prefix:''
def createValue(self):
if 'value' in self:
return self['value'].value
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createDisplay(self):
if 'value' in self:
return unicode(self['value'].display)
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createXML(self, prefix=''):
if 'value' in self:
try:
return self.xml(prefix)
except AttributeError:
return ''
return ''
def getFieldType(self):
return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display)
class BPList(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "bplist00"
PARSER_TAGS = {
"id": "bplist",
"category": "misc",
"file_ext": ("plist",),
"magic": ((MAGIC, 0),),
"min_size": 8 + 32, # bplist00 + 32-byte trailer
"description": "Apple/NeXT Binary Property List",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "magic", 8, "File magic (bplist00)")
if self.size:
self.seekByte(self.size//8-32, True)
else:
# FIXME: UNTESTED
while True:
try:
self.seekByte(1024)
except:
break
self.seekByte(self.size//8-32)
yield BPListTrailer(self, "trailer")
self.seekByte(self['trailer/offsetTableOffset'].value)
yield BPListOffsetTable(self, "offset_table")
for i in self.array("offset_table/offset"):
if self.current_size > i.value*8:
self.seekByte(i.value)
elif self.current_size < i.value*8:
# try to detect files with gaps or unparsed content
yield RawBytes(self, "padding[]", i.value-self.current_size//8)
yield BPListObject(self, "object[]")
def createXML(self, prefix=''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + '''
</plist>'''
|
lastcolour/GraphicsDemo
|
refs/heads/master
|
scripts/compile.py
|
1
|
__all__ = ["CompileRunner"]
import sys
import platform
from utils import runCMD
from runner import Runner
from logger import log
_COMPILE_LOG_FILE = "lastCompileRun.log"
class CompileRunner(Runner):
# TODO: Create base class for CmakeRunner and Compiler runner that encapsulate common behaviour
def __init__(self, cmakeRunInfo):
Runner.__init__(self)
self._config = cmakeRunInfo
def run(self, buildType):
tCompCmd = self._formatCompileCMD()
try:
tCompileLog = self._getLogFileName()
with open(tCompileLog, "w+") as tFile:
compileOut = runCMD(tCompCmd, workDir=self._getRunDir(), pipe=tFile,
isShell=platform.system() is not "Windows")
except:
log.error("[Error] Can't compile project: {0}".format(sys.exc_info()[1]))
return False
else:
log.info("[Info] Compile log saved to: {0}".format(tCompileLog))
if compileOut["ret_code"] != 0:
log.error("[Error] {0} Compiler log {1}".format("=" * 20, "=" * 20))
self._printNiceFailLog(compileOut["out"])
log.error("[Error] {0}".format("-" * 51))
return False
else:
return True
def _printNiceFailLog(self, processOutStr):
tLineDecoration = "|"
for line in processOutStr.split("\n"):
log.warning("{0} {1}".format(tLineDecoration, line))
def _formatCompileCMD(self):
if self._config["COMPILER"] is "MSVC":
return self._formatStudioCMD()
elif self._config["COMPILER"] is "MAKE":
return self._formatMakeCMD()
else:
raise Exception("Unsupported compiler: {0}".format(self._config["COMPILER"]))
def _getLogFileName(self):
return self._config["CMAKE_OUT_DIR"] + "/" + _COMPILE_LOG_FILE
def _getRunDir(self):
return self._config["CMAKE_OUT_DIR"]
def _formatStudioCMD(self):
return "devenv.com ALL_BUILD.vcxproj /Build {0}".format(self._config["BUILD_TYPE"])
def _formatMakeCMD(self):
return "make"
|
Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86
|
refs/heads/master
|
usr/pkg/lib/python2.7/_pyio.py
|
76
|
"""
Python implementation of the io module.
"""
from __future__ import (print_function, unicode_literals)
import os
import abc
import codecs
import warnings
# Import thread instead of threading to reduce startup cost
try:
from thread import allocate_lock as Lock
except ImportError:
from dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
__metaclass__ = type
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super(IOError, self).__init__(errno, strerror)
if not isinstance(characters_written, (int, long)):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1,
encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int, long)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, (int, long)):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase:
__metaclass__ = abc.ABCMeta
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
self.flush()
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is not None and not isinstance(hint, (int, long)):
raise TypeError("integer or None expected")
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call."""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
self.flush()
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf.extend(initial_bytes)
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("integer argument expected, got {0!r}".format(
type(n)))
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except IOError as e:
if e.errno != EINTR:
raise
continue
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline is not None and not isinstance(newline, basestring):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
try:
name = self.name
except AttributeError:
return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding)
else:
return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format(
name, self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
self.flush()
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError:
raise TypeError("an integer is required")
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
MiLk/ansible
|
refs/heads/devel
|
test/units/parsing/vault/test_vault.py
|
13
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import binascii
import io
import os
from binascii import hexlify
import pytest
from ansible.compat.tests import unittest
from ansible import errors
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.vault import VaultLib
from ansible.parsing import vault
class TestVaultIsEncrypted(unittest.TestCase):
def test_bytes_not_encrypted(self):
b_data = b"foobar"
self.assertFalse(vault.is_encrypted(b_data))
def test_bytes_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(vault.is_encrypted(b_data))
def test_text_not_encrypted(self):
b_data = to_text(b"foobar")
self.assertFalse(vault.is_encrypted(b_data))
def test_text_encrypted(self):
b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible"))
self.assertTrue(vault.is_encrypted(b_data))
def test_invalid_text_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
self.assertFalse(vault.is_encrypted(data))
def test_invalid_bytes_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data, encoding='utf-8')
self.assertFalse(vault.is_encrypted(b_data))
class TestVaultIsEncryptedFile(unittest.TestCase):
def test_binary_file_handle_not_encrypted(self):
b_data = b"foobar"
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_not_encrypted(self):
data = u"foobar"
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_encrypted(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible"))
data_fo = io.StringIO(data)
self.assertTrue(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data)
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_file_already_read_from_finds_header(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(42) # Arbitrary number
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_file_already_read_from_saves_file_pos(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(69) # Arbitrary number
vault.is_encrypted_file(b_data_fo)
self.assertEqual(b_data_fo.tell(), 69)
def test_file_with_offset(self):
b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4))
def test_file_with_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length))
def test_file_with_offset_and_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b'JUNK' + b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length))
@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
reason="Skipping cryptography tests because cryptography is not installed")
class TestVaultCipherAes256(unittest.TestCase):
def setUp(self):
self.vault_cipher = vault.VaultAES256()
def test(self):
self.assertIsInstance(self.vault_cipher, vault.VaultAES256)
# TODO: tag these as slow tests
def test_create_key_cryptography(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_cryptography, six.binary_type)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not testing pycrypto key as pycrypto is not installed')
def test_create_key_pycrypto(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_pycrypto, six.binary_type)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason='Not comparing cryptography key to pycrypto key as pycrypto is not installed')
def test_compare_new_keys(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertEqual(b_key_cryptography, b_key_pycrypto)
def test_create_key_known_cryptography(self):
b_password = b'hunter42'
# A fixed salt
b_salt = b'q' * 32 # q is the most random letter.
b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_1, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_2, six.binary_type)
self.assertEqual(b_key_1, b_key_2)
# And again with pycrypto
b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_3, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_4, six.binary_type)
self.assertEqual(b_key_3, b_key_4)
self.assertEqual(b_key_1, b_key_4)
def test_create_key_known_pycrypto(self):
b_password = b'hunter42'
# A fixed salt
b_salt = b'q' * 32 # q is the most random letter.
b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_3, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
self.assertIsInstance(b_key_4, six.binary_type)
self.assertEqual(b_key_3, b_key_4)
def test_is_equal_is_equal(self):
self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz'))
def test_is_equal_unequal_length(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y'))
def test_is_equal_not_equal(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ'))
def test_is_equal_empty(self):
self.assertTrue(self.vault_cipher._is_equal(b'', b''))
def test_is_equal_non_ascii_equal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data))
def test_is_equal_non_ascii_unequal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.')
# Test for the len optimization path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2))
# Test for the slower, char by char comparison path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P'))
def test_is_equal_non_bytes(self):
""" Anything not a byte string should raise a TypeError """
self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason="Skipping Pycrypto tests because pycrypto is not installed")
class TestVaultCipherAes256PyCrypto(TestVaultCipherAes256):
def setUp(self):
self.has_cryptography = vault.HAS_CRYPTOGRAPHY
vault.HAS_CRYPTOGRAPHY = False
super(TestVaultCipherAes256PyCrypto, self).setUp()
def tearDown(self):
vault.HAS_CRYPTOGRAPHY = self.has_cryptography
super(TestVaultCipherAes256PyCrypto, self).tearDown()
@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
reason="Skipping cryptography tests because cryptography is not installed")
class TestVaultLib(unittest.TestCase):
def setUp(self):
self.v = VaultLib('test-vault-password')
def test_encrypt(self):
plaintext = u'Some text to encrypt in a café'
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_encrypt_bytes(self):
plaintext = to_bytes(u'Some text to encrypt in a café')
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_is_encrypted(self):
self.assertFalse(self.v.is_encrypted(b"foobar"), msg="encryption check on plaintext yielded false positive")
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(self.v.is_encrypted(b_data), msg="encryption check on headered text failed")
def test_format_output(self):
self.v.cipher_name = "TEST"
b_ciphertext = b"ansible"
b_vaulttext = self.v._format_output(b_ciphertext)
b_lines = b_vaulttext.split(b'\n')
self.assertGreater(len(b_lines), 1, msg="failed to properly add header")
b_header = b_lines[0]
self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name")
b_header_parts = b_header.split(b';')
self.assertEqual(len(b_header_parts), 3, msg="header has the wrong number of parts")
self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT")
self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect")
self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name")
def test_split_header(self):
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
b_ciphertext = self.v._split_header(b_vaulttext)
b_lines = b_ciphertext.split(b'\n')
self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header")
self.assertEqual(self.v.cipher_name, u'TEST', msg="cipher name was not properly set")
self.assertEqual(self.v.b_version, b"9.9", msg="version was not properly set")
def test_encrypt_decrypt_aes(self):
self.v.cipher_name = u'AES'
self.v.b_password = b'ansible'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
b_vaulttext = b'''$ANSIBLE_VAULT;1.1;AES
53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3
fe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e
786a5a15efeb787e1958cbdd480d076c
'''
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256(self):
self.v.cipher_name = u'AES256'
plaintext = u"foobar"
b_vaulttext = self.v.encrypt(plaintext)
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed")
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256_existing_vault(self):
self.v.cipher_name = u'AES256'
b_orig_plaintext = b"Setec Astronomy"
vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_plaintext = self.v.decrypt(vaulttext)
self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed")
b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict')
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed")
# FIXME This test isn't working quite yet.
@pytest.mark.skip(reason='This test is not ready yet')
def test_encrypt_decrypt_aes256_bad_hmac(self):
self.v.cipher_name = 'AES256'
# plaintext = "Setec Astronomy"
enc_data = '''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_data = to_bytes(enc_data, errors='strict', encoding='utf-8')
b_data = self.v._split_header(b_data)
foo = binascii.unhexlify(b_data)
lines = foo.splitlines()
# line 0 is salt, line 1 is hmac, line 2+ is ciphertext
b_salt = lines[0]
b_hmac = lines[1]
b_ciphertext_data = b'\n'.join(lines[2:])
b_ciphertext = binascii.unhexlify(b_ciphertext_data)
# b_orig_ciphertext = b_ciphertext[:]
# now muck with the text
# b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:]
# b_munged_ciphertext = b_ciphertext
# assert b_orig_ciphertext != b_munged_ciphertext
b_ciphertext_data = binascii.hexlify(b_ciphertext)
b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data])
# reformat
b_invalid_ciphertext = self.v._format_output(b_payload)
# assert we throw an error
self.v.decrypt(b_invalid_ciphertext)
def test_encrypt_encrypted(self):
self.v.cipher_name = u'AES'
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
vaulttext = to_text(b_vaulttext, errors='strict')
self.assertRaises(errors.AnsibleError, self.v.encrypt, b_vaulttext)
self.assertRaises(errors.AnsibleError, self.v.encrypt, vaulttext)
def test_decrypt_decrypted(self):
plaintext = u"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext)
b_plaintext = b"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext)
def test_cipher_not_set(self):
plaintext = u"ansible"
self.v.encrypt(plaintext)
self.assertEquals(self.v.cipher_name, "AES256")
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason="Skipping Pycrypto tests because pycrypto is not installed")
class TestVaultLibPyCrypto(TestVaultLib):
def setUp(self):
self.has_cryptography = vault.HAS_CRYPTOGRAPHY
vault.HAS_CRYPTOGRAPHY = False
super(TestVaultLibPyCrypto, self).setUp()
def tearDown(self):
vault.HAS_CRYPTOGRAPHY = self.has_cryptography
super(TestVaultLibPyCrypto, self).tearDown()
|
modulexcite/wal-e
|
refs/heads/master
|
wal_e/worker/worker_util.py
|
2
|
import tempfile
import time
from wal_e import pipebuf
from wal_e import storage
from wal_e.blobstore import get_blobstore
from wal_e import pipeline
def uri_put_file(creds, uri, fp, content_encoding=None):
blobstore = get_blobstore(storage.StorageLayout(uri))
return blobstore.uri_put_file(creds, uri, fp,
content_encoding=content_encoding)
def do_lzop_put(creds, url, local_path, gpg_key):
"""
Compress and upload a given local path.
:type url: string
:param url: A (s3|wabs)://bucket/key style URL that is the destination
:type local_path: string
:param local_path: a path to a file to be compressed
"""
assert url.endswith('.lzo')
blobstore = get_blobstore(storage.StorageLayout(url))
with tempfile.NamedTemporaryFile(
mode='r+b', bufsize=pipebuf.PIPE_BUF_BYTES) as tf:
with pipeline.get_upload_pipeline(
open(local_path, 'r'), tf, gpg_key=gpg_key):
pass
tf.flush()
clock_start = time.time()
tf.seek(0)
k = blobstore.uri_put_file(creds, url, tf)
clock_finish = time.time()
kib_per_second = format_kib_per_second(
clock_start, clock_finish, k.size)
return kib_per_second
def do_lzop_get(creds, url, path, decrypt):
"""
Get and decompress an S3 or WABS URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
blobstore = get_blobstore(storage.StorageLayout(url))
return blobstore.do_lzop_get(creds, url, path, decrypt)
def format_kib_per_second(start, finish, amount_in_bytes):
try:
return '{0:02g}'.format((amount_in_bytes / 1024) / (finish - start))
except ZeroDivisionError:
return 'NaN'
|
fietew/opti_ssr
|
refs/heads/master
|
opti_ssr_demo_headtracker.py
|
1
|
"""
A python module for demonstrating head orientation tracking for binaural
synthesis.
Usage: python opti_ssr_demo.py [SSR_IP] [SSR_port] [optitrack ip] [multicast address] [optitrack port] [end_message]
"""
import sys
import opti_ssr
from time import sleep
def demo(ssr_ip='localhost', ssr_port=4711, opti_unicast_ip=None, opti_multicast_ip='239.255.42.99', opti_port=1511, ssr_end_message='\0'):
""" #todo
Parameters
----------
ssr_ip : str, optional
IP of the server running thr SSR.
ssr_port : int, optional
Port of SSR Network Interface. By default, port 4711.
opti_unicast_ip : str, optional
IP of the Motive software to establish a unicast connection to.
By default, no unicast connection is established.
opti_multicast_ip : str, optional
Multicast address to connect to.
opti_port : int, optional
Port of the Motive network interface.
ssr_end_message : str, optional
Symbol to terminate the XML messages send to SSR. By default, a binary zero.
"""
# setting arguments if executed in command line
if sys.argv[1:]:
ssr_ip = str(sys.argv[1])
if sys.argv[2:]:
ssr_port = int(sys.argv[2])
if sys.argv[3:]:
opti_unicast_ip = str(sys.argv[3])
if sys.argv[4:]:
opti_multicast_ip = str(sys.argv[4])
if sys.argv[5:]:
opti_port = str(sys.argv[5])
if sys.argv[6:]:
ssr_end_message = str(sys.argv[6])
# instantiation of the necessary class objects
optitrack = opti_ssr.OptiTrackClient(opti_unicast_ip, opti_multicast_ip, opti_port)
ssr = opti_ssr.SSRClient(ssr_ip, ssr_port, ssr_end_message)
headtracker = opti_ssr.HeadTracker(optitrack, ssr)
# continuous tracking of head orientation
headtracker.start()
#
sleep(5)
#
headtracker.calibrate()
if __name__ == "__main__":
demo()
|
eneldoserrata/marcos_openerp
|
refs/heads/master
|
addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/django/utils/termcolors.py
|
73
|
"""
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
del color_names
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
|
sgraham/nope
|
refs/heads/master
|
build/apply_locales.py
|
295
|
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
charlesvdv/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/testing/conftest.py
|
218
|
pytest_plugins = "pytester",
|
ligerzero459/paycoin
|
refs/heads/master
|
contrib/devtools/optimize-pngs.py
|
5
|
#!/usr/bin/env python
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)};
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
|
legalsylvain/OpenUpgrade
|
refs/heads/master
|
addons/hr_holidays/wizard/hr_holidays_summary_department.py
|
44
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# $Id: account.py 1005 2005-07-25 08:41:42Z nicoe $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_summary_dept(osv.osv_memory):
_name = 'hr.holidays.summary.dept'
_description = 'HR Leaves Summary Report By Department'
_columns = {
'date_from': fields.date('From', required=True),
'depts': fields.many2many('hr.department', 'summary_dept_rel', 'sum_id', 'dept_id', 'Department(s)'),
'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Leave Type', required=True)
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'holiday_type': 'Approved'
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
if not data['depts']:
raise osv.except_osv(_('Error!'), _('You have to select at least one Department. And try again.'))
datas = {
'ids': [],
'model': 'ir.ui.menu',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'holidays.summary',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
TarasRudnyk/scrapy
|
refs/heads/master
|
scrapy/utils/reqser.py
|
110
|
"""
Helper functions for serializing (and deserializing) requests.
"""
import six
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
def request_to_dict(request, spider=None):
"""Convert Request object to a dict.
If a spider is given, it will try to find out the name of the spider method
used in the callback and store that as the callback.
"""
cb = request.callback
if callable(cb):
cb = _find_method(spider, cb)
eb = request.errback
if callable(eb):
eb = _find_method(spider, eb)
d = {
'url': to_unicode(request.url), # urls should be safe (safe_string_url)
'callback': cb,
'errback': eb,
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
return d
def request_from_dict(d, spider=None):
"""Create Request object from a dict.
If a spider is given, it will try to resolve the callbacks looking at the
spider for methods with the same name.
"""
cb = d['callback']
if cb and spider:
cb = _get_method(spider, cb)
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
return Request(
url=to_native_str(d['url']),
callback=cb,
errback=eb,
method=d['method'],
headers=d['headers'],
body=d['body'],
cookies=d['cookies'],
meta=d['meta'],
encoding=d['_encoding'],
priority=d['priority'],
dont_filter=d['dont_filter'])
def _find_method(obj, func):
if obj:
try:
func_self = six.get_method_self(func)
except AttributeError: # func has no __self__
pass
else:
if func_self is obj:
return six.get_method_function(func).__name__
raise ValueError("Function %s is not a method of: %s" % (func, obj))
def _get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
raise ValueError("Method %r not found in: %s" % (name, obj))
|
fga-gpp-mds/2017.2-Receituario-Medico
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from pip.req import parse_requirements
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements("requirements.txt", session='hack')
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
setup(name='ReceitaMais',
version='1.0',
description='A simple Django project about doctor''s prescription.',
long_description='This project is from a college subject.A simple Django project about doctor''s prescription.',
author='Ronyell Henrique & Thiago Nogueira',
install_requires=reqs,
license='MIT License',
platforms='Web',
author_email='ronyellhenrique@gmail.com, thiagonf10@gmail.com',
url='https://preskribe.herokuapp.com/',
packages=find_packages(),
)
|
mcgoddard/widgetr
|
refs/heads/master
|
env/Lib/site-packages/flask/testsuite/test_apps/config_package_app/__init__.py
|
1257
|
import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
AWhetter/pacman
|
refs/heads/master
|
test/pacman/tests/sync050.py
|
28
|
self.description = "Install a virtual target (provided by a sync package)"
sp1 = pmpkg("pkg1")
sp1.provides = ["pkg2"]
self.addpkg2db("sync", sp1);
self.args = "-S %s" % sp1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("!PKG_EXIST=pkg2")
|
t0mk/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/sensu_check.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description:
- Whether the check should be present or not
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
required: false
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
required: false
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
required: false
default: []
interval:
description:
- Check interval in seconds
required: false
default: null
timeout:
description:
- Timeout for the check
required: false
default: 10
handle:
description:
- Whether the check should be handled or not
choices: [ 'yes', 'no' ]
required: false
default: yes
subdue_begin:
description:
- When to disable handling of check failures
required: false
default: null
subdue_end:
description:
- When to enable handling of check failures
required: false
default: null
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
required: false
default: []
metric:
description:
- Whether the check is a metric
choices: [ 'yes', 'no' ]
required: false
default: no
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
choices: [ 'yes', 'no' ]
required: false
default: no
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
choices: [ 'yes', 'no' ]
required: false
default: yes
occurrences:
description:
- Number of event occurrences before the handler should take action
required: false
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
required: false
default: null
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
choices: [ 'yes', 'no' ]
required: false
default: no
low_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
high_flap_threshold:
description:
- The high threshhold for flap detection
required: false
default: null
custom:
version_added: "2.1"
description:
- A hash/dictionary of custom parameters for mixing to the configuration.
- You can't rewrite others module parameters using this
required: false
default: {}
source:
version_added: "2.1"
description:
- The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
required: false
default: null
requirements: [ ]
author: "Anders Ingemann (@andsens)"
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check:
name: cpu_load
command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric: yes
handlers: relay
subscribers: common
interval: 60
# Check whether nginx is running
- name: check nginx process
sensu_check:
name: nginx_running
command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
handlers: default
subscribers: nginx
interval: 60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check:
name: check_disk_capacity
state: absent
'''
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
stream = None
try:
try:
stream = open(path, 'r')
config = json.load(stream)
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
'source',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['custom']:
# Convert to json
custom_params = module.params['custom']
overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg)
for k,v in custom_params.items():
if k in config['checks'][name]:
if not config['checks'][name][k] == v:
changed = True
reasons.append('`custom param {opt}\' was changed'.format(opt=k))
else:
changed = True
reasons.append('`custom param {opt}\' was added'.format(opt=k))
check[k] = v
simple_opts += custom_params.keys()
# Remove obsolete custom params
for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt]
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg=str(e))
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
'custom': {'type': 'dict'},
'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
dsajkl/123
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_conditional.py
|
37
|
import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationGenerator
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
parent_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationGenerator(SlashSeparatedCourseKey('edX', 'conditional_test', 'test_run')),
error_msg='random error message'
)
else:
source_descriptor = Mock()
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock()
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
descriptor_system.load_item = {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
|
pyinvoke/invocations
|
refs/heads/administrivia
|
invocations/packaging/release.py
|
1
|
"""
Python package release tasks.
This module assumes:
- you're using semantic versioning for your releases
- you maintain a file called ``$package/_version.py`` containing normal version
conventions (``__version_info__`` tuple and ``__version__`` string).
"""
from __future__ import unicode_literals, print_function
import getpass
import itertools
import logging
import os
import re
import sys
from glob import glob
from shutil import rmtree
from invoke.vendor.six import StringIO
from invoke.vendor.six import text_type, binary_type, PY2
from invoke.vendor.lexicon import Lexicon
from blessings import Terminal
from enum import Enum
from invoke import Collection, task, Exit
from releases.util import parse_changelog
from tabulate import tabulate
from .semantic_version_monkey import Version
from ..util import tmpdir
from ..console import confirm
debug = logging.getLogger("invocations.packaging.release").debug
# TODO: this could be a good module to test out a more class-centric method of
# organizing tasks. E.g.:
# - 'Checks'/readonly things like 'should_changelog' live in a base class
# - one subclass defines dry-run actions for the 'verbs', and is used for
# sanity checking or dry-running
# - another subclass defines actual, mutating actions for the 'verbs', and is
# used for actual release management
# - are those classes simply arbitrary tasky classes used *by*
# actual task functions exposing them; or are they the collections themselves
# (as per #347)?
# - if the latter, how should one "switch" between the subclasses when dry
# running vs real running?
# - what's the CLI "API" look like for that?
# - Different subcollections, e.g. `inv release.dry-run(.all/changelog/etc)`
# vs `inv release.all`?
# - Dry-run flag (which feels more natural/obvious/expected)? How
# would/should that flag affect collection/task loading/selection?
# - especially given task load concerns are typically part of core, but
# this dry-run-or-not behavior clearly doesn't want to be in core?
#
# State junk
#
# Blessings Terminal object for ANSI colorization.
# NOTE: mildly uncomfortable with the instance living at module level, but also
# pretty sure it's unlikely to change meaningfully over time, between
# threads/etc - and it'd be otherwise a PITA to cart around/re-instantiate.
t = Terminal()
check = "\u2714"
ex = "\u2718"
# Types of releases/branches
Release = Enum("Release", "BUGFIX FEATURE UNDEFINED")
# Actions to take for various components - done as enums whose values are
# useful one-line status outputs.
class Changelog(Enum):
OKAY = t.green(check + " no unreleased issues")
NEEDS_RELEASE = t.red(ex + " needs :release: entry")
class VersionFile(Enum):
OKAY = t.green(check + " version up to date")
NEEDS_BUMP = t.red(ex + " needs version bump")
class Tag(Enum):
OKAY = t.green(check + " all set")
NEEDS_CUTTING = t.red(ex + " needs cutting")
# Bits for testing branch names to determine release type
BUGFIX_RE = re.compile(r"^\d+\.\d+$")
BUGFIX_RELEASE_RE = re.compile(r"^\d+\.\d+\.\d+$")
# TODO: allow tweaking this if folks use different branch methodology:
# - same concept, different name, e.g. s/main/dev/
# - different concept entirely, e.g. no main-ish, only feature branches
FEATURE_RE = re.compile(r"^(main|master)$")
class UndefinedReleaseType(Exception):
pass
def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
"""
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state
@task
def status(c):
"""
Print current release (version, changelog, tag, etc) status.
Doubles as a subroutine, returning the return values from its inner call to
``_converge`` (an ``(actions, state)`` two-tuple of Lexicons).
"""
# TODO: wants some holistic "you don't actually HAVE any changes to
# release" final status - i.e. all steps were at no-op status.
actions, state = _converge(c)
table = []
# NOTE: explicit 'sensible' sort (in rough order of how things are usually
# modified, and/or which depend on one another, e.g. tags are near the end)
for component in "changelog version tag".split():
table.append((component.capitalize(), actions[component].value))
print(tabulate(table))
return actions, state
# TODO: thought we had automatic trailing underscore stripping but...no?
@task(name="all", default=True)
def all_(c, dry_run=False):
"""
Catchall version-bump/tag/changelog/PyPI upload task.
:param bool dry_run:
Handed to all subtasks which themselves have a ``dry_run`` flag.
.. versionchanged:: 2.1
Expanded functionality to run ``publish`` and ``push`` as well as
``prepare``.
.. versionchanged:: 2.1
Added the ``dry_run`` flag.
"""
prepare(c, dry_run=dry_run)
publish(c, dry_run=dry_run)
push(c, dry_run=dry_run)
@task
def prepare(c, dry_run=False):
"""
Edit changelog & version, git commit, and git tag, to set up for release.
:param bool dry_run:
Whether to take any actual actions or just say what might occur.
Default: ``False``.
.. versionchanged:: 2.1
Added the ``dry_run`` parameter.
.. versionchanged:: 2.1
Generate annotated git tags instead of lightweight ones.
"""
# Print dry-run/status/actions-to-take data & grab programmatic result
# TODO: maybe expand the enum-based stuff to have values that split up
# textual description, command string, etc. See the TODO up by their
# definition too, re: just making them non-enum classes period.
# TODO: otherwise, we at least want derived eg changelog/version/etc paths
# transmitted from status() into here...
actions, state = status(c)
# TODO: unless nothing-to-do in which case just say that & exit 0
if not dry_run:
if not confirm("Take the above actions?"):
raise Exit("Aborting.")
# TODO: factor out what it means to edit a file:
# - $EDITOR or explicit expansion of it in case no shell involved
# - pty=True and hide=False, because otherwise things can be bad
# - what else?
# Changelog! (pty for non shite editing, eg vim sure won't like non-pty)
if actions.changelog is Changelog.NEEDS_RELEASE:
# TODO: identify top of list and inject a ready-made line? Requires vim
# assumption...GREAT opportunity for class/method based tasks!
cmd = "$EDITOR {.packaging.changelog_file}".format(c)
c.run(cmd, pty=True, hide=False, dry=dry_run)
# Version file!
if actions.version == VersionFile.NEEDS_BUMP:
version_file = os.path.join(
_find_package(c),
c.packaging.get("version_module", "_version") + ".py",
)
cmd = "$EDITOR {}".format(version_file)
c.run(cmd, pty=True, hide=False, dry=dry_run)
if actions.tag == Tag.NEEDS_CUTTING:
# Commit, if necessary, so the tag includes everything.
# NOTE: this strips out untracked files. effort.
cmd = 'git status --porcelain | egrep -v "^\\?"'
if c.run(cmd, hide=True, warn=True).ok:
c.run(
'git commit -am "Cut {}"'.format(state.expected_version),
hide=False,
dry=dry_run,
)
# Tag!
c.run(
'git tag -a {} -m ""'.format(state.expected_version),
hide=False,
dry=dry_run,
echo=True,
)
def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``main``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``main``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is main the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_
def _latest_feature_bucket(changelog):
"""
Select 'latest'/'highest' unreleased feature bucket from changelog.
:returns: a string key from ``changelog``.
"""
unreleased = [x for x in changelog if x.startswith("unreleased_")]
return sorted(
unreleased, key=lambda x: int(x.split("_")[1]), reverse=True
)[0]
# TODO: this feels like it should live in Releases, though that would imply
# adding semantic_version as a dep there, grump
def _versions_from_changelog(changelog):
"""
Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects.
"""
versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)]
return sorted(versions)
# TODO: may want to live in releases.util eventually
def _release_and_issues(changelog, branch, release_type):
"""
Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or main branch), it will be ``None``.
"""
# Bugfix lines just use the branch to find issues
bucket = branch
# Features need a bit more logic
if release_type is Release.FEATURE:
bucket = _latest_feature_bucket(changelog)
# Issues is simply what's in the bucket
issues = changelog[bucket]
# Latest release is undefined for feature lines
release = None
# And requires scanning changelog, for bugfix lines
if release_type is Release.BUGFIX:
versions = [text_type(x) for x in _versions_from_changelog(changelog)]
release = [x for x in versions if x.startswith(bucket)][-1]
return release, issues
def _get_tags(c):
"""
Return sorted list of release-style tags as semver objects.
"""
tags_ = []
for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"):
try:
tags_.append(Version(tagstr))
# Ignore anything non-semver; most of the time they'll be non-release
# tags, and even if they are, we can't reason about anything
# non-semver anyways.
# TODO: perhaps log these to DEBUG
except ValueError:
pass
# Version objects sort semantically
return sorted(tags_)
def _latest_and_next_version(state):
"""
Determine latest version for current branch, and its increment.
E.g. on the ``1.2`` branch, we take the latest ``1.2.x`` release and
increment its tertiary number, so e.g. if the previous release was
``1.2.2``, this function returns ``1.2.3``. If on ``main`` and latest
overall release was ``1.2.2``, it returns ``1.3.0``.
:param dict state:
The ``state`` dict as returned by / generated within `converge`.
:returns: 2-tuple of ``semantic_version.Version``.
"""
if state.release_type == Release.FEATURE:
previous_version = state.latest_overall_release
next_version = previous_version.next_minor()
else:
previous_version = state.latest_line_release
next_version = previous_version.next_patch()
return previous_version, next_version
def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
"""
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
raise Exit("Unable to find a local Python package!")
if len(packages) > 1:
raise Exit("Found multiple Python packages: {!r}".format(packages))
return packages[0]
def load_version(c):
package_name = _find_package(c)
version_module = c.packaging.get("version_module", "_version")
# NOTE: have to explicitly give it a bytestr (Python 2) or unicode (Python
# 3) because https://bugs.python.org/issue21720 HOORAY
cast = binary_type if PY2 else text_type
package = __import__(package_name, fromlist=[cast(version_module)])
# TODO: explode nicely if it lacks a _version/etc, or a __version__
# TODO: make this a Version()?
return getattr(package, version_module).__version__
@task
def build(c, sdist=True, wheel=True, directory=None, python=None, clean=False):
"""
Build sdist and/or wheel archives, optionally in a temp base directory.
All parameters/flags honor config settings of the same name, under the
``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel':
False}})`` to disable building wheel archives by default.
:param bool sdist:
Whether to build sdists/tgzs. Default: ``True``.
:param bool wheel:
Whether to build wheels (requires the ``wheel`` package from PyPI).
Default: ``True``.
:param str directory:
Allows specifying a specific directory in which to perform builds and
dist creation. Useful when running as a subroutine from ``publish``
which sets up a temporary directory.
Up to two subdirectories may be created within this directory: one for
builds (if building wheels), and one for the dist archives.
When ``None`` or another false-y value (which is the default), the
current working directory is used (and thus, local ``dist/`` and
``build/`` subdirectories).
:param str python:
Which Python binary to use when invoking ``setup.py``.
Defaults to ``"python"``.
If ``wheel=True``, then this Python must have ``wheel`` installed in
its default ``site-packages`` (or similar) location.
:param clean:
Whether to clean out the build and dist directories before building.
.. versionchanged:: 2.0
``clean`` now defaults to False instead of True, cleans both dist and
build dirs when True, and honors configuration.
.. versionchanged:: 2.0
``wheel`` now defaults to True instead of False.
"""
# Config hooks
config = c.config.get("packaging", {})
# Check bool flags to see if they were overridden by config.
# TODO: this wants something explicit at the Invoke layer, technically this
# prevents someone from giving eg --sdist on CLI to override a falsey
# config value for it.
if sdist is True and "sdist" in config:
sdist = config["sdist"]
if wheel is True and "wheel" in config:
wheel = config["wheel"]
if clean is False and "clean" in config:
clean = config["clean"]
if directory is None:
directory = config.get("directory", "")
if python is None:
python = config.get("python", "python") # buffalo buffalo
# Sanity
if not sdist and not wheel:
raise Exit(
"You said no sdists and no wheels..."
"what DO you want to build exactly?"
)
# Directory path/arg logic
dist_dir = os.path.join(directory, "dist")
dist_arg = "-d {}".format(dist_dir)
build_dir = os.path.join(directory, "build")
build_arg = "-b {}".format(build_dir)
# Clean
if clean:
for target in (dist_dir, build_dir):
rmtree(target, ignore_errors=True)
# Build
parts = [python, "setup.py"]
if sdist:
parts.extend(("sdist", dist_arg))
if wheel:
# Manually execute build in case we are using a custom build dir.
# Doesn't seem to be a way to tell bdist_wheel to do this directly.
parts.extend(("build", build_arg))
parts.extend(("bdist_wheel", dist_arg))
c.run(" ".join(parts))
def find_gpg(c):
for candidate in "gpg gpg1 gpg2".split():
if c.run("which {}".format(candidate), hide=True, warn=True).ok:
return candidate
@task
def publish(
c,
sdist=True,
wheel=True,
index=None,
sign=False,
dry_run=False,
directory=None,
dual_wheels=False,
alt_python=None,
check_desc=False,
):
"""
Publish code to PyPI or index of choice. Wraps ``build`` and ``publish``.
This uses the ``twine`` command under the hood, both its pre-upload
``check`` subcommand (which verifies the archives to be uploaded, including
checking your PyPI readme) and the ``upload`` one.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs. Default: ``True``.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
Default: ``True``.
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip upload step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
Note that this does not skip the ``twine check`` step, just the final
upload.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``.
"""
# Don't hide by default, this step likes to be verbose most of the time.
c.config.run.hide = False
# Config hooks
# TODO: this pattern is too widespread. Really needs something in probably
# Executor that automatically does this on our behalf for any kwargs we
# indicate should be configurable
config = c.config.get("packaging", {})
if index is None and "index" in config:
index = config["index"]
if sign is False and "sign" in config:
sign = config["sign"]
if dual_wheels is False and "dual_wheels" in config:
dual_wheels = config["dual_wheels"]
if check_desc is False and "check_desc" in config:
check_desc = config["check_desc"]
# Initial sanity check, if needed. Will die usefully.
# TODO: remove next backwards incompat release, twine check replaces it
if check_desc:
c.run("python setup.py check -r -s")
# Build, into controlled temp dir (avoids attempting to re-upload old
# files)
with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
# Build default archives
build(c, sdist=sdist, wheel=wheel, directory=tmp)
# Build opposing interpreter archive, if necessary
# TODO: delete dual wheels when dropping Py2 support
if dual_wheels:
if not alt_python:
alt_python = "python2"
if sys.version_info[0] == 2:
alt_python = "python3"
build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
# Use twine's check command on built artifacts (at present this just
# validates long_description)
c.run("twine check {}".format(os.path.join(tmp, "dist", "*")))
# Do the thing! (Maybe.)
upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run)
def upload(c, directory, index=None, sign=False, dry_run=False):
"""
Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
"""
# Obtain list of archive filenames, then ensure any wheels come first
# so their improved metadata is what PyPI sees initially (otherwise, it
# only honors the sdist's lesser data).
archives = list(
itertools.chain.from_iterable(
glob(os.path.join(directory, "dist", "*.{}".format(extension)))
for extension in ("whl", "tar.gz")
)
)
# Sign each archive in turn
# NOTE: twine has a --sign option but it's not quite flexible enough &
# doesn't allow you to dry-run or upload manually when API is borked...
if sign:
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO(getpass.getpass(prompt) + "\n")
gpg_bin = find_gpg(c)
if not gpg_bin:
raise Exit(
"You need to have one of `gpg`, `gpg1` or `gpg2` "
"installed to GPG-sign!"
)
for archive in archives:
cmd = "{} --detach-sign -a --passphrase-fd 0 {{}}".format(
gpg_bin
) # noqa
c.run(cmd.format(archive), in_stream=input_)
input_.seek(0) # So it can be replayed by subsequent iterations
# Upload
parts = ["twine", "upload"]
if index:
parts.append("--repository {}".format(index))
paths = archives[:]
if sign:
paths.append(os.path.join(directory, "dist", "*.asc"))
parts.extend(paths)
cmd = " ".join(parts)
if dry_run:
print("Would publish via: {}".format(cmd))
print("Files that would be published:")
c.run("ls -l {}".format(" ".join(paths)))
else:
c.run(cmd)
@task
def push(c, dry_run=False):
"""
Push current branch and tags to default Git remote.
"""
kwargs = dict(echo=True) if dry_run else dict()
opts = " --dry-run --no-verify" if dry_run else ""
c.run("git push --follow-tags{}".format(opts), **kwargs)
# TODO: still need time to solve the 'just myself pls' problem
ns = Collection("release", all_, status, prepare, build, publish, push)
# Hide stdout by default, preferring to explicitly enable it when necessary.
ns.configure({"run": {"hide": "stdout"}})
|
faner-father/tushare
|
refs/heads/master
|
tushare/stock/trading.py
|
5
|
# -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
return data
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending=False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6'] else 'sz%s'%code
|
Karaage-Cluster/karaage-debian
|
refs/heads/master
|
karaage/legacy/people/south_migrations/0021_remove_old_tables.py
|
3
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.db import connection
class Migration(SchemaMigration):
@staticmethod
def delete_table(name):
cursor = connection.cursor()
if name in connection.introspection.get_table_list(cursor):
db.delete_table(name)
def forwards(self, orm):
self.delete_table('auth_user_groups')
self.delete_table('auth_user_user_permissions')
self.delete_table('auth_group_permissions')
self.delete_table('auth_group')
self.delete_table('auth_message')
self.delete_table('auth_permission')
self.delete_table('auth_user')
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='auth').delete()
self.delete_table('placard_counters')
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='placard').delete()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'institutes.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate'", 'to': "orm['people.Person']", 'through': "orm['institutes.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'institutes.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate', 'db_table': "'institutedelegate'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {})
},
'people.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'people.person': {
'Meta': {'ordering': "['full_name', 'short_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'legacy_ldap_password': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['contenttypes', 'people']
|
arank/mxnet
|
refs/heads/master
|
example/rcnn/rcnn/symbol/symbol_vgg.py
|
15
|
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
def get_vgg_conv(data):
"""
shared convolutional layers
:param data: Symbol
:return: Symbol
"""
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
return relu5_3
def get_vgg_rcnn(num_classes=config.NUM_CLASSES):
"""
Fast R-CNN with VGG 16 conv layers
:param num_classes: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_weight = mx.symbol.Variable(name='bbox_weight')
# reshape input
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
label = mx.symbol.Reshape(data=label, shape=(-1, ), name='label_reshape')
bbox_target = mx.symbol.Reshape(data=bbox_target, shape=(-1, 4 * num_classes), name='bbox_target_reshape')
bbox_weight = mx.symbol.Reshape(data=bbox_weight, shape=(-1, 4 * num_classes), name='bbox_weight_reshape')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
# group output
group = mx.symbol.Group([cls_prob, bbox_loss])
return group
def get_vgg_rcnn_test(num_classes=config.NUM_CLASSES):
"""
Fast R-CNN Network with VGG
:param num_classes: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
# reshape rois
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
# shared convolutional layer
relu5_3 = get_vgg_conv(data)
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([cls_prob, bbox_pred])
return group
def get_vgg_rpn(num_anchors=config.NUM_ANCHORS):
"""
Region Proposal Network with VGG
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="cls_prob")
# bounding box regression
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# group output
group = mx.symbol.Group([cls_prob, bbox_loss])
return group
def get_vgg_rpn_test(num_anchors=config.NUM_ANCHORS):
"""
Region Proposal Network with VGG
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
group = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', output_score=True,
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.PROPOSAL_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.PROPOSAL_POST_NMS_TOP_N,
threshold=config.TEST.PROPOSAL_NMS_THRESH, rpn_min_size=config.TEST.PROPOSAL_MIN_SIZE)
else:
group = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', output_score=True,
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.PROPOSAL_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.PROPOSAL_POST_NMS_TOP_N,
threshold=config.TEST.PROPOSAL_NMS_THRESH, rpn_min_size=config.TEST.PROPOSAL_MIN_SIZE)
# rois = group[0]
# score = group[1]
return group
def get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
"""
Faster R-CNN test with VGG 16 conv layers
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
def get_vgg_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
"""
Faster R-CNN end-to-end with VGG 16 conv layers
:param num_classes: used to determine output size
:param num_anchors: used to determine output size
:return: Symbol
"""
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
relu5_3 = get_vgg_conv(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=relu5_3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# Fast R-CNN
pool5 = mx.symbol.ROIPooling(
name='roi_pool5', data=relu5_3, rois=rois, pooled_size=(7, 7), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# group 6
flatten = mx.symbol.Flatten(data=pool5, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=drop7, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=drop7, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
|
wangyang59/tf_models
|
refs/heads/master
|
slim/nets/inception_v1_test.py
|
54
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
|
simonpatrick/bite-project
|
refs/heads/master
|
tools/bugs/server/appengine/handlers/bugs/urls.py
|
17
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get a set of bugs based on url."""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
import logging
import webapp2
from bugs import kind
from bugs.models.url_bug_map import get_bugs
from common.handlers import base
class Error(base.Error):
pass
class UrlsHandler(base.BaseHandler):
"""Get bug entries based on url."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def post(self):
"""Get bugs for the given urls.
Raises:
Error: Raised upon failure.
"""
logging.info('UrlBugMap handler; bugs.handlers.bugs.urls.UrlsHandler')
try:
data = self.GetData(kind.Kind.URLS)
mappings = get_bugs.GetBugs(data['urls'])
self.WriteResponse({'kind': kind.Kind.URL_BUG_MAP, 'mappings': mappings})
except get_bugs.Error, e:
raise Error('Failed to retrieve bugs for Url to Bug map: %s\n' % e,
code=400)
except base.Error, e:
raise Error(e)
routes = [
webapp2.Route(r'/bugs/urls', handler=UrlsHandler, name='bugs_urls',
methods=['POST'])
]
app = webapp2.WSGIApplication(routes, debug=True)
|
kaji-project/shinken
|
refs/heads/kaji
|
test/test_passive_pollers.py
|
18
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class GoodArbiter(ArbiterLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def have_conf(self, i):
return True
def do_not_run(self):
pass
class GoodScheduler(SchedulerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def have_conf(self, i):
return True
def put_conf(self, conf):
return True
class BadScheduler(SchedulerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
def have_conf(self, i):
return False
class GoodPoller(PollerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadPoller(PollerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class GoodReactionner(ReactionnerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadReactionner(ReactionnerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class GoodBroker(BrokerLink):
# To lie about satellites
def ping(self):
print "Dummy OK for", self.get_name()
self.set_alive()
def put_conf(self, conf):
return True
class BadBroker(BrokerLink):
def ping(self):
print "Dummy bad ping", self.get_name()
self.add_failed_check_attempt()
class TestPassivePoller(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_passive_pollers.cfg')
def test_simple_passive_pollers(self):
print "The dispatcher", self.dispatcher
# dummy for the arbiter
for a in self.conf.arbiters:
a.__class__ = GoodArbiter
print "Preparing schedulers"
scheduler1 = self.conf.schedulers.find_by_name('scheduler-all-1')
self.assertIsNot(scheduler1, None)
scheduler1.__class__ = GoodScheduler
scheduler2 = self.conf.schedulers.find_by_name('scheduler-all-2')
self.assertIsNot(scheduler2, None)
scheduler2.__class__ = BadScheduler
# Poller 1 is normal, 2 and 3 are passives
print "Preparing pollers"
poller1 = self.conf.pollers.find_by_name('poller-all-1')
self.assertIsNot(poller1, None)
poller1.__class__ = GoodPoller
print poller1.__dict__
self.assertEqual(False, poller1.passive)
poller2 = self.conf.pollers.find_by_name('poller-all-2')
self.assertIsNot(poller2, None)
poller2.__class__ = GoodPoller
self.assertEqual(True, poller2.passive)
poller3 = self.conf.pollers.find_by_name('poller-all-3')
self.assertIsNot(poller3, None)
poller3.__class__ = GoodPoller
self.assertEqual(True, poller3.passive)
print "Preparing reactionners"
reactionner1 = self.conf.reactionners.find_by_name('reactionner-all-1')
self.assertIsNot(reactionner1, None)
reactionner1.__class__ = GoodReactionner
reactionner2 = self.conf.reactionners.find_by_name('reactionner-all-2')
self.assertIsNot(reactionner2, None)
reactionner2.__class__ = BadReactionner
print "Preparing brokers"
broker1 = self.conf.brokers.find_by_name('broker-all-1')
self.assertIsNot(broker1, None)
broker1.__class__ = GoodBroker
broker2 = self.conf.brokers.find_by_name('broker-all-2')
self.assertIsNot(broker2, None)
broker2.__class__ = BadBroker
# Ping all elements. Should have 1 as OK, 2 as
# one bad attempt (3 max)
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
self.assertEqual(1, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(1, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(1, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now add another attempt, still alive, but attemp=2/3
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(True, scheduler2.alive)
#import pdb; pdb.set_trace()
self.assertEqual(2, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(True, reactionner2.alive)
self.assertEqual(2, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(True, broker2.alive)
self.assertEqual(2, broker2.attempt)
self.assertEqual(False, broker2.reachable)
time.sleep(60)
### Now we get BAD, We go DEAD for N2!
self.dispatcher.check_alive()
# Check good values
self.assertEqual(True, scheduler1.alive)
self.assertEqual(0, scheduler1.attempt)
self.assertEqual(True, scheduler1.reachable)
# still alive, just unreach
self.assertEqual(False, scheduler2.alive)
self.assertEqual(3, scheduler2.attempt)
self.assertEqual(False, scheduler2.reachable)
# and others satellites too
self.assertEqual(True, poller1.alive)
self.assertEqual(0, poller1.attempt)
self.assertEqual(True, poller1.reachable)
# still alive, just unreach
self.assertEqual(True, poller2.alive)
self.assertEqual(0, poller2.attempt)
self.assertEqual(True, poller2.reachable)
# and others satellites too
self.assertEqual(True, reactionner1.alive)
self.assertEqual(0, reactionner1.attempt)
self.assertEqual(True, reactionner1.reachable)
# still alive, just unreach
self.assertEqual(False, reactionner2.alive)
self.assertEqual(3, reactionner2.attempt)
self.assertEqual(False, reactionner2.reachable)
# and others satellites too
self.assertEqual(True, broker1.alive)
self.assertEqual(0, broker1.attempt)
self.assertEqual(True, broker1.reachable)
# still alive, just unreach
self.assertEqual(False, broker2.alive)
self.assertEqual(3, broker2.attempt)
self.assertEqual(False, broker2.reachable)
# Now we check how we should dispatch confs
self.dispatcher.check_dispatch()
# the conf should not be in a good shape
self.assertEqual(False, self.dispatcher.dispatch_ok)
# Now we really dispatch them!
self.dispatcher.dispatch()
self.assert_any_log_match('Dispatch OK of conf in scheduler scheduler-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to reactionner reactionner-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to poller poller-all-1')
self.assert_any_log_match('Dispatch OK of configuration 0 to broker broker-all-1')
self.clear_logs()
# And look if we really dispatch conf as we should
for r in self.conf.realms:
for cfg in r.confs.values():
self.assertEqual(True, cfg.is_assigned)
self.assertEqual(scheduler1, cfg.assigned_to)
if __name__ == '__main__':
unittest.main()
|
MSeifert04/numpy
|
refs/heads/master
|
numpy/distutils/fcompiler/ibm.py
|
8
|
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import subprocess
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.misc_util import make_temp_file
from distutils import log
compilers = ['IBMFCompiler']
class IBMFCompiler(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
try:
o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
except (OSError, subprocess.CalledProcessError):
pass
else:
m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
with open(xlf_cfg, 'r') as fi:
crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler(compiler='ibm').get_version())
|
prodromou87/gem5
|
refs/heads/master
|
tests/quick/se/02.insttest/test.py
|
56
|
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
root.system.cpu[0].workload = LiveProcess(cmd = 'insttest',
executable = binpath('insttest'))
|
charleszheng44/cctools
|
refs/heads/master
|
weaver/src/examples/functions.py
|
13
|
stat = ParseFunction('stat {IN} > {OUT}')
file = stat('/etc/hosts', '{basename}.stat')
stat(file, '{FULL}.stat', collect=True)
Define('MYVAR1', 1)
Export(['MYVAR1', 'MYVAR2'])
env = ParseFunction('env > {OUT}', environment={'MYVAR2': 2})
env(outputs='env0.txt')
env(outputs='env1.txt', environment={'MYVAR3': 3})
|
lategoodbye/linux-lcd6610
|
refs/heads/master
|
scripts/gdb/linux/cpus.py
|
997
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# per-cpu tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import tasks, utils
MAX_CPUS = 4096
def get_current_cpu():
if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
return gdb.selected_thread().num - 1
elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
tid = gdb.selected_thread().ptid[2]
if tid > (0x100000000 - MAX_CPUS - 2):
return 0x100000000 - tid - 2
else:
return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
else:
raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
"supported with this gdb server.")
def per_cpu(var_ptr, cpu):
if cpu == -1:
cpu = get_current_cpu()
if utils.is_target_arch("sparc:v9"):
offset = gdb.parse_and_eval(
"trap_block[{0}].__per_cpu_base".format(str(cpu)))
else:
try:
offset = gdb.parse_and_eval(
"__per_cpu_offset[{0}]".format(str(cpu)))
except gdb.error:
# !CONFIG_SMP case
offset = 0
pointer = var_ptr.cast(utils.get_long_type()) + offset
return pointer.cast(var_ptr.type).dereference()
cpu_mask = {}
def cpu_mask_invalidate(event):
global cpu_mask
cpu_mask = {}
gdb.events.stop.disconnect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
def cpu_list(mask_name):
global cpu_mask
mask = None
if mask_name in cpu_mask:
mask = cpu_mask[mask_name]
if mask is None:
mask = gdb.parse_and_eval(mask_name + ".bits")
if hasattr(gdb, 'events'):
cpu_mask[mask_name] = mask
gdb.events.stop.connect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(cpu_mask_invalidate)
bits_per_entry = mask[0].type.sizeof * 8
num_entries = mask.type.sizeof * 8 / bits_per_entry
entry = -1
bits = 0
while True:
while bits == 0:
entry += 1
if entry == num_entries:
return
bits = mask[entry]
if bits != 0:
bit = 0
break
while bits & 1 == 0:
bits >>= 1
bit += 1
cpu = entry * bits_per_entry + bit
bits >>= 1
bit += 1
yield cpu
class PerCpu(gdb.Function):
"""Return per-cpu variable.
$lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the
given CPU number. If CPU is omitted, the CPU of the current context is used.
Note that VAR has to be quoted as string."""
def __init__(self):
super(PerCpu, self).__init__("lx_per_cpu")
def invoke(self, var_name, cpu=-1):
var_ptr = gdb.parse_and_eval("&" + var_name.string())
return per_cpu(var_ptr, cpu)
PerCpu()
class LxCurrentFunc(gdb.Function):
"""Return current task.
$lx_current([CPU]): Return the per-cpu task variable for the given CPU
number. If CPU is omitted, the CPU of the current context is used."""
def __init__(self):
super(LxCurrentFunc, self).__init__("lx_current")
def invoke(self, cpu=-1):
var_ptr = gdb.parse_and_eval("¤t_task")
return per_cpu(var_ptr, cpu).dereference()
LxCurrentFunc()
|
django-nonrel/django-nonrel
|
refs/heads/develop
|
django/conf/locale/id/formats.py
|
355
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i.s"
TIME_FORMAT = 'G.i.s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i.s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
nox/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/runtests.py
|
146
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, sys
import glob
import optparse
import traceback
import WebIDL
class TestHarness(object):
def __init__(self, test, verbose):
self.test = test
self.verbose = verbose
self.printed_intro = False
def start(self):
if self.verbose:
self.maybe_print_intro()
def finish(self):
if self.verbose or self.printed_intro:
print("Finished test %s" % self.test)
def maybe_print_intro(self):
if not self.printed_intro:
print("Starting test %s" % self.test)
self.printed_intro = True
def test_pass(self, msg):
if self.verbose:
print("TEST-PASS | %s" % msg)
def test_fail(self, msg):
self.maybe_print_intro()
print("TEST-UNEXPECTED-FAIL | %s" % msg)
def ok(self, condition, msg):
if condition:
self.test_pass(msg)
else:
self.test_fail(msg)
def check(self, a, b, msg):
if a == b:
self.test_pass(msg)
else:
self.test_fail(msg)
print("\tGot %s expected %s" % (a, b))
def run_tests(tests, verbose):
testdir = os.path.join(os.path.dirname(__file__), 'tests')
if not tests:
tests = glob.iglob(os.path.join(testdir, "*.py"))
sys.path.append(testdir)
for test in tests:
(testpath, ext) = os.path.splitext(os.path.basename(test))
_test = __import__(testpath, globals(), locals(), ['WebIDLTest'])
harness = TestHarness(test, verbose)
harness.start()
try:
_test.WebIDLTest.__call__(WebIDL.Parser(), harness)
except Exception, ex:
print("TEST-UNEXPECTED-FAIL | Unhandled exception in test %s: %s" % (testpath, ex))
traceback.print_exc()
finally:
harness.finish()
if __name__ == '__main__':
usage = """%prog [OPTIONS] [TESTS]
Where TESTS are relative to the tests directory."""
parser = optparse.OptionParser(usage=usage)
parser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True,
help="Don't print passing tests.")
options, tests = parser.parse_args()
run_tests(tests, verbose=options.verbose)
|
lailongwei/llbc
|
refs/heads/master
|
tools/building_script/lu_postbuild.py
|
1
|
# -*- coding: utf-8 -*-
"""
lullbc编译后执行脚本
"""
import sys
from os import path as op
import shutil
from c import Cfg
def main():
# 复制llbc.lua到目标目录
target_path = sys.argv[3]
is_debug = sys.argv[2].upper() == 'DEBUG' or False
src = op.join(Cfg.getscriptpath(), 'llbc.lua')
if not is_debug:
target = op.join(target_path, 'llbc.lua')
else:
target = op.join(target_path, 'llbc_debug.lua')
print 'Copy [{0}] to [{1}]...'.format(src, target)
shutil.move(src, target)
print 'Done!'
if __name__ == '__main__':
main()
|
vvuk/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_enum_duplicate_values.py
|
276
|
import WebIDL
def WebIDLTest(parser, harness):
try:
parser.parse("""
enum TestEnumDuplicateValue {
"",
""
};
""")
harness.ok(False, "Should have thrown!")
except:
harness.ok(True, "Enum TestEnumDuplicateValue should throw")
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/views/generic_urls.py
|
44
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle, UrlArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = patterns('',
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# Special URLs for particular regression cases.
url('^中文/$', 'regressiontests.views.views.redirect'),
url('^中文/target/$', 'regressiontests.views.views.index_page'),
)
# rediriects, both temporary and permanent, with non-ASCII targets
urlpatterns += patterns('',
('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
)
urlpatterns += patterns('regressiontests.views.views',
(r'^shortcuts/render_to_response/$', 'render_to_response_view'),
(r'^shortcuts/render_to_response/request_context/$', 'render_to_response_view_with_request_context'),
(r'^shortcuts/render_to_response/content_type/$', 'render_to_response_view_with_content_type'),
(r'^shortcuts/render/$', 'render_view'),
(r'^shortcuts/render/base_context/$', 'render_view_with_base_context'),
(r'^shortcuts/render/content_type/$', 'render_view_with_content_type'),
(r'^shortcuts/render/status/$', 'render_view_with_status'),
(r'^shortcuts/render/current_app/$', 'render_view_with_current_app'),
(r'^shortcuts/render/current_app_conflict/$', 'render_view_with_current_app_conflict'),
)
|
franekp/ankidict
|
refs/heads/master
|
ankidict/thirdparty/cherrypy/lib/httputil.py
|
6
|
"""HTTP library functions.
This module contains functions for building an HTTP application
framework: any one, not just one whose name starts with "Ch". ;) If you
reference any modules from some popular framework inside *this* module,
FuManChu will personally hang you up by your thumbs and submit you
to a public caning.
"""
from binascii import b2a_base64
import six
from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou
from cherrypy._cpcompat import basestring, iteritems
from cherrypy._cpcompat import reversed, sorted, unquote_qs
response_codes = BaseHTTPRequestHandler.responses.copy()
# From https://github.com/cherrypy/cherrypy/issues/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
import re
from cgi import parse_header
def urljoin(*atoms):
"""Return the given path \*atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = "/".join([x for x in atoms if x])
while "//" in url:
url = url.replace("//", "/")
# Special-case the final url of "", and return "/" instead.
return url or "/"
def urljoin_bytes(*atoms):
"""Return the given path *atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = ntob("/").join([x for x in atoms if x])
while ntob("//") in url:
url = url.replace(ntob("//"), ntob("/"))
# Special-case the final url of "", and return "/" instead.
return url or ntob("/")
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are suitable
for use in a slicing operation. That is, the header "Range: bytes=3-6",
if applied against a Python string, is requesting resource[3:7]. This
function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split("=", 1)
for brange in byteranges.split(","):
start, stop = [x.strip() for x in brange.split("-", 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a first-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-range-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
#
# RFC 2616 Section 14.35.1:
# If the entity is shorter than the specified suffix-length,
# the entire entity-body is used.
if int(stop) > content_length:
result.append((0, content_length))
else:
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return cmp(self.value, other.value)
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
return str("%s%s" % (self.value, "".join(p)))
def __bytes__(self):
return ntob(self.__str__())
def __unicode__(self):
return ntou(self.__str__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
initial_value, params = parse_header(elementstr)
return initial_value, params
parse = staticmethod(parse)
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
from_str = classmethod(from_str)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
from_str = classmethod(from_str)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __cmp__(self, other):
diff = cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = cmp(str(self), str(other))
return diff
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in RE_HEADER_SPLIT.split(fieldvalue):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
def decode_TEXT(value):
r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr")."""
try:
# Python 3
from email.header import decode_header
except ImportError:
from email.Header import decode_header
atoms = decode_header(value)
decodedvalue = ""
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, or a str that begins with an int.
If status is an int, or a str and no reason-phrase is supplied,
a default reason-phrase will be provided.
"""
if not status:
status = 200
status = str(status)
parts = status.split(" ", 1)
if len(parts) == 1:
# No reason supplied.
code, = parts
reason = None
else:
code, reason = parts
reason = reason.strip()
try:
code = int(code)
except ValueError:
raise ValueError("Illegal response status from server "
"(%s is non-numeric)." % repr(code))
if code < 100 or code > 599:
raise ValueError("Illegal response status from server "
"(%s is out of range)." % repr(code))
if code not in response_codes:
# code is unknown but not illegal
default_reason, message = "", ""
else:
default_reason, message = response_codes[code]
if reason is None:
reason = default_reason
return code, reason, message
# NOTE: the parse_qs functions that follow are modified version of those
# in the python3.0 source - we need to pass through an encoding to the unquote
# method, but the default parse_qs function doesn't allow us to. These do.
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a dict, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
d = {}
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote_qs(nv[0], encoding)
value = unquote_qs(nv[1], encoding)
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
return d
image_map_pattern = re.compile(r"[0-9]+,[0-9]+")
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
"""Build a params dictionary from a query_string.
Duplicate key/value pairs in the provided query_string will be
returned as {'key': [val1, val2, ...]}. Single key/values will
be returned as strings: {'key': 'value'}.
"""
if image_map_pattern.match(query_string):
# Server-side image map. Map the coords to 'x' and 'y'
# (like CGI::Request does).
pm = query_string.split(",")
pm = {'x': int(pm[0]), 'y': int(pm[1])}
else:
pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
return pm
class CaseInsensitiveDict(dict):
"""A case-insensitive dict subclass.
Each key is changed on entry to str(key).title().
"""
def __getitem__(self, key):
return dict.__getitem__(self, str(key).title())
def __setitem__(self, key, value):
dict.__setitem__(self, str(key).title(), value)
def __delitem__(self, key):
dict.__delitem__(self, str(key).title())
def __contains__(self, key):
return dict.__contains__(self, str(key).title())
def get(self, key, default=None):
return dict.get(self, str(key).title(), default)
if hasattr({}, 'has_key'):
def has_key(self, key):
return str(key).title() in self
def update(self, E):
for k in E.keys():
self[str(k).title()] = E[k]
def fromkeys(cls, seq, value=None):
newdict = cls()
for k in seq:
newdict[str(k).title()] = value
return newdict
fromkeys = classmethod(fromkeys)
def setdefault(self, key, x=None):
key = str(key).title()
try:
return self[key]
except KeyError:
self[key] = x
return x
def pop(self, key, default):
return dict.pop(self, str(key).title(), default)
# TEXT = <any OCTET except CTLs, but including LWS>
#
# A CRLF is allowed in the definition of TEXT only as part of a header
# field continuation. It is expected that the folding LWS will be
# replaced with a single SP before interpretation of the TEXT value."
if str == bytes:
header_translate_table = ''.join([chr(i) for i in xrange(256)])
header_translate_deletechars = ''.join(
[chr(i) for i in xrange(32)]) + chr(127)
else:
header_translate_table = None
header_translate_deletechars = bytes(range(32)) + bytes([127])
class HeaderMap(CaseInsensitiveDict):
"""A dict subclass for HTTP request and response headers.
Each key is changed on entry to str(key).title(). This allows headers
to be case-insensitive and avoid duplicates.
Values are header values (decoded according to :rfc:`2047` if necessary).
"""
protocol = (1, 1)
encodings = ["ISO-8859-1"]
# Someday, when http-bis is done, this will probably get dropped
# since few servers, clients, or intermediaries do it. But until then,
# we're going to obey the spec as is.
# "Words of *TEXT MAY contain characters from character sets other than
# ISO-8859-1 only when encoded according to the rules of RFC 2047."
use_rfc_2047 = True
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
key = str(key).title()
value = self.get(key)
return header_elements(key, value)
def values(self, key):
"""Return a sorted list of HeaderElement.value for the given header."""
return [e.value for e in self.elements(key)]
def output(self):
"""Transform self into a list of (name, value) tuples."""
return list(self.encode_header_items(self.items()))
def encode_header_items(cls, header_items):
"""
Prepare the sequence of name, value tuples into a form suitable for
transmitting on the wire for HTTP.
"""
for k, v in header_items:
if isinstance(k, six.text_type):
k = cls.encode(k)
if not isinstance(v, basestring):
v = str(v)
if isinstance(v, six.text_type):
v = cls.encode(v)
# See header_translate_* constants above.
# Replace only if you really know what you're doing.
k = k.translate(header_translate_table,
header_translate_deletechars)
v = v.translate(header_translate_table,
header_translate_deletechars)
yield (k, v)
encode_header_items = classmethod(encode_header_items)
def encode(cls, v):
"""Return the given header name or value, encoded for HTTP output."""
for enc in cls.encodings:
try:
return v.encode(enc)
except UnicodeEncodeError:
continue
if cls.protocol == (1, 1) and cls.use_rfc_2047:
# Encode RFC-2047 TEXT
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
# We do our own here instead of using the email module
# because we never want to fold lines--folding has
# been deprecated by the HTTP working group.
v = b2a_base64(v.encode('utf-8'))
return (ntob('=?utf-8?b?') + v.strip(ntob('\n')) + ntob('?='))
raise ValueError("Could not encode header part %r using "
"any of the encodings %r." %
(v, cls.encodings))
encode = classmethod(encode)
class Host(object):
"""An internet address.
name
Should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = "0.0.0.0"
port = 80
name = "unknown.tld"
def __init__(self, ip, port, name=None):
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
|
Azulinho/ansible
|
refs/heads/devel
|
test/units/modules/network/netscaler/test_netscaler_server.py
|
16
|
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerServerModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.server_mock = MagicMock()
cls.server_mock.__class__ = MagicMock(add=Mock())
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.basic': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.server': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.basic.server.server': cls.server_mock,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerServerModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerServerModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_server
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_server.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_server.nitro_exception', MockException):
self.module = netscaler_server
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_server
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_server
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
server_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=server_proxy_mock),
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
server_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=server_proxy_mock),
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
server_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=server_proxy_mock),
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
server_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=server_proxy_mock),
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_do_state_change_fail(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
nitro_exception=self.MockException,
get_nitro_client=m,
server_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=server_proxy_mock),
do_state_change=Mock(return_value=Mock(errorcode=1, message='Failed on purpose'))
):
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Error when setting disabled state. errorcode: 1 message: Failed on purpose')
def test_new_server_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
server_exists=Mock(side_effect=[False, True]),
server_identical=Mock(side_effect=[True]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
server_proxy_mock.assert_has_calls([call.add()])
def test_modified_server_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[True, True]),
server_identical=Mock(side_effect=[False, True]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
server_proxy_mock.assert_has_calls([call.update()])
def test_absent_server_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[True, False]),
server_identical=Mock(side_effect=[False, True]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
server_proxy_mock.assert_has_calls([call.delete()])
def test_present_server_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[True, True]),
server_identical=Mock(side_effect=[True, True]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
server_proxy_mock.assert_not_called()
def test_absent_server_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[False, False]),
server_identical=Mock(side_effect=[False, False]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
self.exited()
server_proxy_mock.assert_not_called()
def test_present_server_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[True, True]),
server_identical=Mock(side_effect=[False, False]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Server is not configured according to parameters given')
self.assertTrue(result['failed'])
def test_present_server_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[False, False]),
server_identical=Mock(side_effect=[False, False]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Server does not seem to exist')
self.assertTrue(result['failed'])
def test_present_server_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
server_exists=Mock(side_effect=[True, True]),
server_identical=Mock(side_effect=[False, False]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_server_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_server
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
server_proxy_mock = Mock()
server_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=server_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
server_exists=Mock(side_effect=[True, True]),
server_identical=Mock(side_effect=[False, False]),
ConfigProxy=config_proxy_mock,
do_state_change=Mock(return_value=Mock(errorcode=0))
):
self.module = netscaler_server
result = self.failed()
self.assertEqual(result['msg'], 'Server seems to be present')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_server
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
server_exists=m,
nitro_exception=MockException
):
self.module = netscaler_server
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_server
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_server',
server_exists=m,
nitro_exception=MockException
):
self.module = netscaler_server
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
|
AzCiS/autorest
|
refs/heads/master
|
src/client/Python/msrestazure/msrestazure/azure_configuration.py
|
13
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
try:
from configparser import NoOptionError
except ImportError:
from ConfigParser import NoOptionError
from .version import msrestazure_version
from msrest import Configuration
from msrest.exceptions import raise_with_traceback
class AzureConfiguration(Configuration):
"""Azure specific client configuration.
:param str base_url: REST Service base URL.
:param str filepath: Path to an existing config file (optional).
"""
def __init__(self, base_url, filepath=None):
super(AzureConfiguration, self).__init__(base_url, filepath)
self.long_running_operation_timeout = 30
self.add_user_agent("msrest_azure/{}".format(msrestazure_version))
def save(self, filepath):
"""Save current configuration to file.
:param str filepath: Path to save file to.
:raises: ValueError if supplied filepath cannot be written to.
:rtype: None
"""
self._config.add_section("Azure")
self._config.set("Azure",
"long_running_operation_timeout",
self.long_running_operation_timeout)
return super(AzureConfiguration, self).save(filepath)
def load(self, filepath):
"""Load configuration from existing file.
:param str filepath: Path to existing config file.
:raises: ValueError if supplied config file is invalid.
:rtype: None
"""
try:
self._config.read(filepath)
self.long_running_operation_timeout = self._config.getint(
"Azure", "long_running_operation_timeout")
except (ValueError, EnvironmentError, NoOptionError):
msg = "Supplied config file incompatible"
raise_with_traceback(ValueError, msg)
finally:
self._clear_config()
return super(AzureConfiguration, self).load(filepath)
|
iamkingmaker/trading-with-python
|
refs/heads/master
|
cookbook/ib_streamQuotes.py
|
77
|
'''
Copyright: Jev Kuznetsov
License: BSD
Demonstration of how to stream quotes from IB.
This script will subscribe to SPY and stream quotes to the sreen for 10 seconds.
'''
from time import sleep
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
def price_tick_handler(msg):
""" function to handle price ticks """
print msg
#--------------main script------------------
tws = ibConnection() # create connection object
tws.register(price_tick_handler, message.TickPrice) # register handler
tws.connect() # connect to API
#-------create contract and subscribe to data
c = Contract()
c.m_symbol = "SPY"
c.m_secType= "STK"
c.m_exchange = "SMART"
c.m_currency = "USD"
tws.reqMktData(1,c,"",False) # request market data
#-------print data for a couple of seconds, then close
sleep(10)
print 'All done'
tws.disconnect()
|
vbannai/neutron
|
refs/heads/master
|
neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Edgar Magana, Cisco Systems, Inc.
# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com)
#
"""
PlugIn for Nexus OS driver
"""
import logging
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc
from neutron.plugins.cisco.common import config as conf
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco.db import nexus_db_v2 as nxos_db
from neutron.plugins.cisco import l2device_plugin_base
LOG = logging.getLogger(__name__)
class NexusPlugin(l2device_plugin_base.L2DevicePluginBase):
"""Nexus PlugIn Main Class."""
_networks = {}
def __init__(self):
"""Extract configuration parameters from the configuration file."""
self._client = importutils.import_object(conf.CISCO.nexus_driver)
LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver)
self._nexus_switches = conf.get_device_dictionary()
def create_network(self, network, attachment):
"""Create or update a network when an attachment is changed.
This method is not invoked at the usual plugin create_network() time.
Instead, it is invoked on create/update port.
:param network: Network on which the port operation is happening
:param attachment: Details about the owner of the port
Create a VLAN in the appropriate switch/port, and configure the
appropriate interfaces for this VLAN.
"""
LOG.debug(_("NexusPlugin:create_network() called"))
# Grab the switch IPs and ports for this host
host_connections = []
host = attachment['host_name']
for switch_type, switch_ip, attr in self._nexus_switches:
if str(attr) == str(host):
port = self._nexus_switches[switch_type, switch_ip, attr]
# Get ether type for port, assume an ethernet type
# if none specified.
if ':' in port:
etype, port_id = port.split(':')
else:
etype, port_id = 'ethernet', port
host_connections.append((switch_ip, etype, port_id))
if not host_connections:
raise cisco_exc.NexusComputeHostNotConfigured(host=host)
vlan_id = network[const.NET_VLAN_ID]
vlan_name = network[const.NET_VLAN_NAME]
auto_create = True
auto_trunk = True
if cdb.is_provider_vlan(vlan_id):
vlan_name = ''.join([conf.CISCO.provider_vlan_name_prefix,
str(vlan_id)])
auto_create = conf.CISCO.provider_vlan_auto_create
auto_trunk = conf.CISCO.provider_vlan_auto_trunk
# Check if this network is already in the DB
for switch_ip, etype, port_id in host_connections:
vlan_created = False
vlan_trunked = False
eport_id = '%s:%s' % (etype, port_id)
# Check for switch vlan bindings
try:
# This vlan has already been created on this switch
# via another operation, like SVI bindings.
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
vlan_created = True
auto_create = False
except cisco_exc.NexusPortBindingNotFound:
# No changes, proceed as normal
pass
try:
nxos_db.get_port_vlan_switch_binding(eport_id, vlan_id,
switch_ip)
except cisco_exc.NexusPortBindingNotFound:
if auto_create and auto_trunk:
# Create vlan and trunk vlan on the port
LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name)
self._client.create_and_trunk_vlan(
switch_ip, vlan_id, vlan_name, etype, port_id)
vlan_created = True
vlan_trunked = True
elif auto_create:
# Create vlan but do not trunk it on the port
LOG.debug(_("Nexus: create vlan %s"), vlan_name)
self._client.create_vlan(switch_ip, vlan_id, vlan_name)
vlan_created = True
elif auto_trunk:
# Only trunk vlan on the port
LOG.debug(_("Nexus: trunk vlan %s"), vlan_name)
self._client.enable_vlan_on_trunk_int(
switch_ip, vlan_id, etype, port_id)
vlan_trunked = True
try:
instance = attachment[const.INSTANCE_ID]
nxos_db.add_nexusport_binding(eport_id, str(vlan_id),
switch_ip, instance)
except Exception:
with excutils.save_and_reraise_exception():
# Add binding failed, roll back any vlan creation/enabling
if vlan_created and vlan_trunked:
LOG.debug(_("Nexus: delete & untrunk vlan %s"),
vlan_name)
self._client.delete_and_untrunk_vlan(switch_ip,
vlan_id,
etype, port_id)
elif vlan_created:
LOG.debug(_("Nexus: delete vlan %s"), vlan_name)
self._client.delete_vlan(switch_ip, vlan_id)
elif vlan_trunked:
LOG.debug(_("Nexus: untrunk vlan %s"), vlan_name)
self._client.disable_vlan_on_trunk_int(switch_ip,
vlan_id,
etype,
port_id)
net_id = network[const.NET_ID]
new_net_dict = {const.NET_ID: net_id,
const.NET_NAME: network[const.NET_NAME],
const.NET_PORTS: {},
const.NET_VLAN_NAME: vlan_name,
const.NET_VLAN_ID: vlan_id}
self._networks[net_id] = new_net_dict
return new_net_dict
def add_router_interface(self, vlan_name, vlan_id, subnet_id,
gateway_ip, router_id):
"""Create VLAN SVI on the Nexus switch."""
# Find a switch to create the SVI on
switch_ip = self._find_switch_for_svi()
if not switch_ip:
raise cisco_exc.NoNexusSviSwitch()
# Check if this vlan exists on the switch already
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except cisco_exc.NexusPortBindingNotFound:
# Create vlan and trunk vlan on the port
self._client.create_and_trunk_vlan(
switch_ip, vlan_id, vlan_name, etype=None, nexus_port=None)
# Check if a router interface has already been created
try:
nxos_db.get_nexusvm_bindings(vlan_id, router_id)
raise cisco_exc.SubnetInterfacePresent(subnet_id=subnet_id,
router_id=router_id)
except cisco_exc.NexusPortBindingNotFound:
self._client.create_vlan_svi(switch_ip, vlan_id, gateway_ip)
nxos_db.add_nexusport_binding('router', str(vlan_id),
switch_ip, router_id)
return True
def remove_router_interface(self, vlan_id, router_id):
"""Remove VLAN SVI from the Nexus Switch."""
# Grab switch_ip from database
switch_ip = nxos_db.get_nexusvm_bindings(vlan_id,
router_id)[0].switch_ip
# Delete the SVI interface from the switch
self._client.delete_vlan_svi(switch_ip, vlan_id)
# Invoke delete_port to delete this row
# And delete vlan if required
return self.delete_port(router_id, vlan_id)
def _find_switch_for_svi(self):
"""Get a switch to create the SVI on."""
LOG.debug(_("Grabbing a switch to create SVI"))
nexus_switches = self._client.nexus_switches
if conf.CISCO.svi_round_robin:
LOG.debug(_("Using round robin to create SVI"))
switch_dict = dict(
(switch_ip, 0) for switch_ip, _ in nexus_switches)
try:
bindings = nxos_db.get_nexussvi_bindings()
# Build a switch dictionary with weights
for binding in bindings:
switch_ip = binding.switch_ip
if switch_ip not in switch_dict:
switch_dict[switch_ip] = 1
else:
switch_dict[switch_ip] += 1
# Search for the lowest value in the dict
if switch_dict:
switch_ip = min(switch_dict, key=switch_dict.get)
return switch_ip
except cisco_exc.NexusPortBindingNotFound:
pass
LOG.debug(_("No round robin or zero weights, using first switch"))
# Return the first switch in the config
return conf.first_device_ip
def delete_network(self, tenant_id, net_id, **kwargs):
"""Delete network.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:delete_network() called")) # pragma no cover
def update_network(self, tenant_id, net_id, **kwargs):
"""Update the properties of a particular Virtual Network.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:update_network() called")) # pragma no cover
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""Create port.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:create_port() called")) # pragma no cover
def delete_port(self, device_id, vlan_id):
"""Delete port.
Delete port bindings from the database and scan whether the network
is still required on the interfaces trunked.
"""
LOG.debug(_("NexusPlugin:delete_port() called"))
# Delete DB row(s) for this port
try:
rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)
except cisco_exc.NexusPortBindingNotFound:
return
auto_delete = True
auto_untrunk = True
if cdb.is_provider_vlan(vlan_id):
auto_delete = conf.CISCO.provider_vlan_auto_create
auto_untrunk = conf.CISCO.provider_vlan_auto_trunk
LOG.debug(_("delete_network(): provider vlan %s"), vlan_id)
instance_id = False
for row in rows:
instance_id = row['instance_id']
switch_ip = row.switch_ip
etype, nexus_port = '', ''
if row['port_id'] == 'router':
etype, nexus_port = 'vlan', row['port_id']
auto_untrunk = False
else:
etype, nexus_port = row['port_id'].split(':')
nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,
row.switch_ip,
row.instance_id)
# Check whether there are any remaining instances using this
# vlan on this Nexus port.
try:
nxos_db.get_port_vlan_switch_binding(row.port_id,
row.vlan_id,
row.switch_ip)
except cisco_exc.NexusPortBindingNotFound:
try:
if nexus_port and auto_untrunk:
# Untrunk the vlan from this Nexus interface
self._client.disable_vlan_on_trunk_int(
switch_ip, row.vlan_id, etype, nexus_port)
# Check whether there are any remaining instances
# using this vlan on the Nexus switch.
if auto_delete:
try:
nxos_db.get_nexusvlan_binding(row.vlan_id,
row.switch_ip)
except cisco_exc.NexusPortBindingNotFound:
# Delete this vlan from this switch
self._client.delete_vlan(switch_ip, row.vlan_id)
except Exception:
# The delete vlan operation on the Nexus failed,
# so this delete_port request has failed. For
# consistency, roll back the Nexus database to what
# it was before this request.
with excutils.save_and_reraise_exception():
nxos_db.add_nexusport_binding(row.port_id,
row.vlan_id,
row.switch_ip,
row.instance_id)
return instance_id
def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs):
"""Update port.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:update_port() called")) # pragma no cover
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""Plug interfaces.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:plug_interface() called")) # pragma no cover
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""Unplug interface.
Not applicable to Nexus plugin. Defined here to satisfy abstract
method requirements.
"""
LOG.debug(_("NexusPlugin:unplug_interface() called")
) # pragma no cover
|
ismaelliang/GitRebaseDemo
|
refs/heads/master
|
file_5.py
|
1
|
print('file_5')
|
SGenheden/lammps
|
refs/heads/master
|
tools/moltemplate/common/amber/amberparm_improper_to_lt.py
|
19
|
#!/usr/bin/env python
import sys
lines_gaff = sys.stdin.readlines()
improper_style_name = 'cvff'
sys.stdout.write(' write_once("In Settings") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
impropertype = '@improper:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
#sys.stdout.write(' '+impropertype+' @atom:'+at1+' @atom:'+at2+' @atom:'+at3+' @atom:'+at4+'\n')
# Oops. This is incorrect.
# In moltemplate, the central atom is the first atom,
# In "gaff.dat", the central atom is the third atom
# http://archive.ambermd.org/201307/0519.html
#impropertype = '@improper:'+atype3+'-'+atype1+'-'+atype2+'-'+atype4
tokens= line[11:].split()
Kn = float(tokens[0])
dn = float(tokens[1])
n = int(float(tokens[2]))
comments=' '.join(tokens[3:])
if (dn < 0.001):
sys.stdout.write(' improper_coeff '+impropertype+' '+improper_style_name+' '+str(Kn)+' 1 '+str(n)+' # '+comments+'\n')
elif (179.999 < abs(dn) < 180.001):
sys.stdout.write(' improper_coeff '+impropertype+' '+improper_style_name+' '+str(Kn)+' -1 '+str(n)+' # '+comments+'\n')
else:
sys.stderr.write('Error: Illegal bondImproper parameters:\n'
' As of 2013-8-03, LAMMPS doens hot have an improper style\n'
' which can handle impropers with gamma != 0 or 180\n')
exit(-1)
sys.stdout.write(' } # (end of improper_coeffs)\n')
sys.stdout.write('\n')
sys.stdout.write(' write_once("Data Impropers By Type (gaff_imp.py)") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
impropertype = '@improper:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
sys.stdout.write(' '+impropertype+' @atom:'+at1+' @atom:'+at2+' @atom:'+at3+' @atom:'+at4+'\n')
# The improper-angle is the angle between the planes
# defined by at1,at2,at3, and at2,at3,at3
# and we list the atoms in this order.
# NOTE: In "gaff.dat", the central atom is the third atom (at3)
# so we have to take this into account when matching atom order.
# http://archive.ambermd.org/201307/0519.html
sys.stdout.write(' } # (end of Impropers By Type)\n')
sys.stdout.write('\n')
# NOTE: AMBER documentation is not clear how the improper angle is defined.
# It's not clear if we should be using the dihedral angle between
# planes I-J-K and J-K-L. As of 2014-4, improper_style cvff does this.
# Even if we create improper interactions with the angle defined between
# the wrong planes, at least the minima should be the same
# (0 degrees or 180 degrees).
# So I'm not too worried we are getting this detail wrong long as
# we generate new impropers realizing that the 3rd atom (K) is the
# central atom (according to AMBER conventions).
#
# http://structbio.vanderbilt.edu/archives/amber-archive/2007/0408.php
#
# Currently, we only apply improper torsional angles for atoms
# in a planar conformations. Is it clear?
# Junmei
|
trungnt13/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_ridge.py
|
130
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
|
interaction-lab/cordial-public
|
refs/heads/master
|
cordial_tts/src/cordial_tts/__init__.py
|
1
|
from cordial_tts import CoRDialTTS
|
JaneliaSciComp/hybridizer
|
refs/heads/master
|
tests/dispense_volumes_plot.py
|
4
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plot
import numpy
from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
import argparse
INCHES_PER_ML = 0.078
VOLTS_PER_ADC_UNIT = 0.0049
def load_numpy_data(path):
with open(path,'r') as fid:
header = fid.readline().rstrip().split(',')
dt = numpy.dtype({'names':header,'formats':['S25']*len(header)})
numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1)
return numpy_data
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("data_file_path",help="Path to csv data file.")
parser.add_argument("plot_title",help="Plot title.")
args = parser.parse_args()
data_file_path = args.data_file_path
plot_title = args.plot_title
print("Data File Path: {0}".format(data_file_path))
print("Plot title: {0}".format(plot_title))
# Load data
dispense_data = load_numpy_data(data_file_path)
cylinders = list(dispense_data.dtype.names)
cylinders.remove('dispense_goal')
cylinders.remove('initial_weight')
cylinders = [cylinder for cylinder in cylinders if 'jumps' not in cylinder and 'adc' not in cylinder]
print(cylinders)
cylinder_count = len(cylinders)
print(cylinder_count)
dispense_goals = numpy.int16(dispense_data['dispense_goal'])
dispense_goal_set = list(set(dispense_goals))
dispense_goal_set.sort(reverse=True)
print(dispense_goal_set)
goal_count = len(dispense_goal_set)
print(goal_count)
index = numpy.arange(goal_count)
index = index*cylinder_count
bar_width = 0.35
fig, ax = plot.subplots()
opacity = 0.6
error_config = {'ecolor': '0.3'}
colors = ['b','g','r','c','m','y','k','b']
for cylinder_n in range(cylinder_count):
cylinder_means = []
cylinder_stds = []
for dispense_goal in dispense_goal_set:
goal_data = dispense_data[dispense_goals==dispense_goal]
cylinder_data = numpy.float64(goal_data[cylinders[cylinder_n]])
cylinder_mean = numpy.mean(cylinder_data)
cylinder_means.append(cylinder_mean)
cylinder_std = numpy.std(cylinder_data)
cylinder_stds.append(cylinder_std)
print(cylinder_n)
print(cylinder_means)
print(cylinder_stds)
print('')
plot.bar(index+bar_width*(cylinder_n),
cylinder_means,
bar_width,
alpha=opacity,
color=colors[cylinder_n],
yerr=cylinder_stds,
error_kw=error_config,
label=cylinders[cylinder_n])
plot.xlabel('Dispense Volume Goal (ml)')
plot.ylabel('Dispense Volume Measured (ml)')
plot.title('ELF Dispense Test: ' + plot_title)
plot.xticks(index+(bar_width*cylinder_count/2),dispense_goal_set)
plot.legend()
plot.grid(True)
plot.ylim((0,11))
plot.yticks(numpy.arange(0,11,1.0))
plot.tight_layout()
plot.show()
|
arju88nair/projectCulminate
|
refs/heads/master
|
venv/lib/python3.5/site-packages/pylint/test/input/func_w0401_package/__init__.py
|
20
|
"""Our big package."""
__revision__ = None
|
cristianocs/DAS
|
refs/heads/master
|
texto/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_compat.py
|
901
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
HydrelioxGitHub/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/worldclock.py
|
8
|
"""
Support for showing the time in a different time zone.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.worldclock/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_TIME_ZONE)
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Worldclock Sensor'
ICON = 'mdi:clock'
TIME_STR_FORMAT = '%H:%M'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TIME_ZONE): cv.time_zone,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the World clock sensor."""
name = config.get(CONF_NAME)
time_zone = dt_util.get_time_zone(config.get(CONF_TIME_ZONE))
async_add_entities([WorldClockSensor(time_zone, name)], True)
class WorldClockSensor(Entity):
"""Representation of a World clock sensor."""
def __init__(self, time_zone, name):
"""Initialize the sensor."""
self._name = name
self._time_zone = time_zone
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
async def async_update(self):
"""Get the time and updates the states."""
self._state = dt_util.now(time_zone=self._time_zone).strftime(
TIME_STR_FORMAT)
|
netpocket/webui
|
refs/heads/master
|
app/bower_components/js-beautify/python/cssbeautifier/tests/test.py
|
1
|
import unittest
import cssbeautifier
class CSSBeautifierTest(unittest.TestCase):
def resetOptions(self):
self.options = cssbeautifier.default_options()
self.options.indent_size = 1
self.options.indent_char = '\t'
self.options.selector_separator_newline = True
self.options.end_with_newline = True
def testBasics(self):
self.resetOptions()
t = self.decodesto
t("", "\n")
t(".tabs{}", ".tabs {}\n")
t(".tabs{color:red}", ".tabs {\n\tcolor: red\n}\n")
t(".tabs{color:rgb(255, 255, 0)}", ".tabs {\n\tcolor: rgb(255, 255, 0)\n}\n")
t(".tabs{background:url('back.jpg')}", ".tabs {\n\tbackground: url('back.jpg')\n}\n")
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}\n")
t("@media print {.tab{}}", "@media print {\n\t.tab {}\n}\n")
def testComments(self):
self.resetOptions()
t = self.decodesto
t("/* test */", "/* test */\n\n")
t(".tabs{/* test */}", ".tabs {\n\t/* test */\n}\n")
t("/* header */.tabs {}", "/* header */\n\n.tabs {}\n")
def testSeperateSelectors(self):
self.resetOptions()
t = self.decodesto
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}\n")
t("a, img {padding: 0.2px}", "a,\nimg {\n\tpadding: 0.2px\n}\n")
def testOptions(self):
self.resetOptions()
self.options.indent_size = 2
self.options.indent_char = ' '
self.options.selector_separator_newline = False
t = self.decodesto
t("#bla, #foo{color:green}", "#bla, #foo {\n color: green\n}\n")
t("@media print {.tab{}}", "@media print {\n .tab {}\n}\n")
t("#bla, #foo{color:black}", "#bla, #foo {\n color: black\n}\n")
def decodesto(self, input, expectation=None):
self.assertEqual(
cssbeautifier.beautify(input, self.options), expectation or input)
if __name__ == '__main__':
unittest.main()
|
chaffra/sympy
|
refs/heads/master
|
examples/beginner/differentiation.py
|
106
|
#!/usr/bin/env python
"""Differentiation example
Demonstrates some differentiation operations.
"""
import sympy
from sympy import pprint
def main():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
e = (a + 2*b)**5
print("\nExpression : ")
print()
pprint(e)
print("\n\nDifferentiating w.r.t. a:")
print()
pprint(e.diff(a))
print("\n\nDifferentiating w.r.t. b:")
print()
pprint(e.diff(b))
print("\n\nSecond derivative of the above result w.r.t. a:")
print()
pprint(e.diff(b).diff(a, 2))
print("\n\nExpanding the above result:")
print()
pprint(e.expand().diff(b).diff(a, 2))
print()
if __name__ == "__main__":
main()
|
kchodorow/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/wrappers/grpc_wrapper.py
|
30
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger wrapper session that sends debug data to file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Google-internal import(s).
from tensorflow.python.debug.wrappers import framework
class GrpcDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):
"""Debug Session wrapper that send debug data to gRPC stream(s)."""
_GRPC_URL_PREFIX = "grpc://"
def __init__(self,
sess,
grpc_debug_server_addresses,
watch_fn=None,
log_usage=True):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
grpc_debug_server_addresses: (`str` or `list` of `str`) Single or a list
of the gRPC debug server addresses, in the format of
<host:port>, without the "grpc://" prefix. For example:
"localhost:7000",
["localhost:7000", "192.168.0.2:8000"]
watch_fn: (`Callable`) A Callable that can be used to define per-run
debug ops and watched tensors. See the doc of
`NonInteractiveDebugWrapperSession.__init__()` for details.
log_usage: (`bool`) whether the usage of this class is to be logged.
Raises:
TypeError: If `grpc_debug_server_addresses` is not a `str` or a `list`
of `str`.
"""
if log_usage:
pass # No logging for open-source.
framework.NonInteractiveDebugWrapperSession.__init__(
self, sess, watch_fn=watch_fn)
if isinstance(grpc_debug_server_addresses, str):
self._grpc_debug_server_urls = [
self._GRPC_URL_PREFIX + grpc_debug_server_addresses
]
elif isinstance(grpc_debug_server_addresses, list):
self._grpc_debug_server_urls = []
for address in grpc_debug_server_addresses:
if not isinstance(address, str):
raise TypeError(
"Expected type str in list grpc_debug_server_addresses, "
"received type %s" % type(address))
self._grpc_debug_server_urls.append(self._GRPC_URL_PREFIX + address)
else:
raise TypeError(
"Expected type str or list in grpc_debug_server_addresses, "
"received type %s" % type(grpc_debug_server_addresses))
def _prepare_run_debug_urls(self, fetches, feed_dict):
"""Implementation of abstract method in superclass.
See doc of `NonInteractiveDebugWrapperSession.__prepare_run_debug_urls()`
for details.
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
this `Session.run()` call.
"""
return self._grpc_debug_server_urls
|
jnerin/ansible
|
refs/heads/devel
|
lib/ansible/plugins/strategy/linear.py
|
3
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
strategy: linear
short_description: Executes tasks in a linear fashion
description:
- Task execution is in lockstep per host batch as defined by C(serial) (default all).
Up to the fork limit of hosts will execute each task at the same time and then
the next series of hosts until the batch is done, before going on to the next task.
version_added: "2.0"
notes:
- This was the default Ansible behaviour before 'strategy plugins' were introduced in 2.0.
author: Ansible Core Team
'''
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins.loader import action_loader
from ansible.plugins.strategy import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
display.debug("counting tasks in each state of execution")
host_tasks_to_run = [(host, state_task)
for host, state_task in iteritems(host_tasks)
if state_task and state_task[1]]
if host_tasks_to_run:
try:
lowest_cur_block = min(
(s.cur_block for h, (s, t) in host_tasks_to_run
if s.run_state != PlayIterator.ITERATING_COMPLETE))
except ValueError:
lowest_cur_block = None
else:
# empty host_tasks_to_run will just run till the end of the function
# without ever touching lowest_cur_block
lowest_cur_block = None
for (k, v) in host_tasks_to_run:
(s, t) = v
if s.cur_block > lowest_cur_block:
# Not the current block, ignore it
continue
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups,
num_tasks,
num_rescue,
num_always))
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks.get(host.name)
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = self._tqm.RUN_OK
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
display.debug("getting the remaining hosts for this loop")
hosts_left = self.get_hosts_left(iterator)
display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
# flag set if task is set to any_errors_fatal
any_errors_fatal = False
results = []
for (host, task) in host_tasks:
if not task:
continue
if self._tqm._terminated:
break
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
# for the linear strategy, we run meta tasks just once and for
# all hosts currently being iterated over rather than one host
results.extend(self._execute_meta(task, play_context, iterator, host))
if task.args.get('_raw_params', None) != 'noop':
run_once = True
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
display.debug("getting variables")
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
display.debug("done getting variables")
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
del task_vars
# if we're bypassing the host loop, break out now
if run_once:
break
results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))
# go to next host/task group
if skip_rest:
continue
display.debug("done queuing things up, now waiting for results queue to drain")
if self._pending_results > 0:
results += self._wait_on_pending_results(iterator)
host_results.extend(results)
self.update_active_connections(results)
try:
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
# this is a fatal error, so we abort here regardless of block state
return self._tqm.RUN_ERROR
include_failure = False
if len(included_files) > 0:
display.debug("we have included files to process")
# A noop task for use in padding dynamic includes
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
display.debug("generating all_blocks data")
all_blocks = dict((host, []) for host in hosts_left)
display.debug("done generating all_blocks data")
for included_file in included_files:
display.debug("processing included file: %s" % included_file._filename)
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
if included_file._is_role:
new_ir = included_file._task.copy()
new_ir.vars.update(included_file._args)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
self._tqm.update_handler_list([handler for handler_block in handler_blocks for handler in handler_block.block])
else:
new_blocks = self._load_included_file(included_file, iterator=iterator)
display.debug("iterating over new_blocks loaded from include file")
for new_block in new_blocks:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
task=included_file._task,
)
display.debug("filtering new block on tags")
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
display.debug("done filtering new block on tags")
noop_block = Block(parent_block=task._parent)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
display.debug("done iterating over new_blocks loaded from include file")
except AnsibleError as e:
for host in included_file._hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.error(to_text(e), wrap_text=False)
include_failure = True
continue
# finally go through all of the hosts and append the
# accumulated blocks to their list of tasks
display.debug("extending task lists for all hosts with included blocks")
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
display.debug("done extending task lists")
display.debug("done processing included files")
display.debug("results queue empty")
display.debug("checking for any_errors_fatal")
failed_hosts = []
unreachable_hosts = []
for res in results:
if res.is_failed() and iterator.is_failed(res._host):
failed_hosts.append(res._host.name)
elif res.is_unreachable():
unreachable_hosts.append(res._host.name)
# if any_errors_fatal and we had an error, mark all hosts as failed
if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
dont_fail_states = frozenset([iterator.ITERATING_RESCUE, iterator.ITERATING_ALWAYS])
for host in hosts_left:
(s, _) = iterator.get_next_task_for_host(host, peek=True)
if s.run_state not in dont_fail_states or \
s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0:
self._tqm._failed_hosts[host.name] = True
result |= self._tqm.RUN_FAILED_BREAK_PLAY
display.debug("done checking for any_errors_fatal")
display.debug("checking for max_fail_percentage")
if iterator._play.max_fail_percentage is not None and len(results) > 0:
percentage = iterator._play.max_fail_percentage / 100.0
if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage:
for host in hosts_left:
# don't double-mark hosts, or the iterator will potentially
# fail them out of the rescue/always states
if host.name not in failed_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result |= self._tqm.RUN_FAILED_BREAK_PLAY
display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage))
display.debug("done checking for max_fail_percentage")
display.debug("checking to see if all hosts have failed and the running result is not ok")
if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
display.debug("^ not ok, so returning result now")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
return result
display.debug("done checking to see if all hosts have failed")
except (IOError, EOFError) as e:
display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return self._tqm.RUN_UNKNOWN_ERROR
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
|
ApsOps/zulip
|
refs/heads/master
|
zerver/test_hooks.py
|
103
|
# -*- coding: utf-8 -*-
from zerver.lib.test_helpers import AuthedTestCase
from zerver.lib.test_runner import slow
from zerver.models import Message
import ujson
class JiraHookTests(AuthedTestCase):
def send_jira_message(self, action):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
return self.send_json_payload(email,
url,
self.fixture_data('jira', action),
stream_name="jira",
content_type="application/json")
def test_unknown(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
result = self.client.post(url, self.fixture_data('jira', 'unknown'),
stream_name="jira",
content_type="application/json")
self.assert_json_error(result, 'Unknown JIRA event type')
def test_custom_stream(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
action = 'created'
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
msg = self.send_json_payload(email, url,
self.fixture_data('jira', action),
stream_name="jira_custom",
content_type="application/json")
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created(self):
msg = self.send_jira_message('created')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created_assignee(self):
msg = self.send_jira_message('created_assignee')
self.assertEqual(msg.subject, "TEST-4: Test Created Assignee")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **created** [TEST-4](https://zulipp.atlassian.net/browse/TEST-4) priority Major, assigned to **Leonardo Franchi [Administrator]**:
> Test Created Assignee""")
def test_commented(self):
msg = self.send_jira_message('commented')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
Adding a comment. Oh, what a comment it is!
""")
def test_commented_markup(self):
msg = self.send_jira_message('commented_markup')
self.assertEqual(msg.subject, "TEST-7: Testing of rich text")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-7](https://zulipp.atlassian.net/browse/TEST-7):\n\n\nThis is a comment that likes to **exercise** a lot of _different_ `conventions` that `jira uses`.\r\n\r\n~~~\n\r\nthis code is not highlighted, but monospaced\r\n\n~~~\r\n\r\n~~~\n\r\ndef python():\r\n print "likes to be formatted"\r\n\n~~~\r\n\r\n[http://www.google.com](http://www.google.com) is a bare link, and [Google](http://www.google.com) is given a title.\r\n\r\nThanks!\r\n\r\n~~~ quote\n\r\nSomeone said somewhere\r\n\n~~~\n""")
def test_deleted(self):
msg = self.send_jira_message('deleted')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, "Leo Franchi **deleted** [BUG-15](http://lfranchi.com:8080/browse/BUG-15)!")
def test_reassigned(self):
msg = self.send_jira_message('reassigned')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
* Changed assignee from **None** to @**Othello, the Moor of Venice**
""")
def test_reopened(self):
msg = self.send_jira_message('reopened')
self.assertEqual(msg.subject, "BUG-7: More cowbell polease")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-7](http://lfranchi.com:8080/browse/BUG-7) (assigned to @**Othello, the Moor of Venice**):
* Changed resolution from **Fixed** to **None**
* Changed status from **Resolved** to **Reopened**
Re-opened yeah!
""")
def test_resolved(self):
msg = self.send_jira_message('resolved')
self.assertEqual(msg.subject, "BUG-13: Refreshing the page loses the user's current posi...")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-13](http://lfranchi.com:8080/browse/BUG-13) (assigned to @**Othello, the Moor of Venice**):
* Changed status from **Open** to **Resolved**
* Changed assignee from **None** to @**Othello, the Moor of Venice**
* Changed resolution from **None** to **Fixed**
Fixed it, finally!
""")
def test_workflow_postfuncion(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction_started(self):
msg = self.send_jira_message('postfunction_started')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Underway""")
def test_workflow_postfunction_resolved(self):
msg = self.send_jira_message('postfunction_resolved')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Resolved""")
def test_mention(self):
msg = self.send_jira_message('watch_mention_updated')
self.assertEqual(msg.subject, "TEST-5: Lunch Decision Needed")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-5](https://zulipp.atlassian.net/browse/TEST-5) (assigned to @**Othello, the Moor of Venice**):
Making a comment, @**Othello, the Moor of Venice** is watching this issue
""")
def test_priority_updated(self):
msg = self.send_jira_message('updated_priority')
self.assertEqual(msg.subject, "TEST-1: Fix That")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-1](https://zulipp.atlassian.net/browse/TEST-1) (assigned to **leo@zulip.com**):
* Changed priority from **Critical** to **Major**
""")
class BeanstalkHookTests(AuthedTestCase):
def send_beanstalk_message(self, action):
email = "hamlet@zulip.com"
data = {'payload': self.fixture_data('beanstalk', action)}
return self.send_json_payload(email, "/api/v1/external/beanstalk",
data,
stream_name="commits",
**self.api_auth(email))
def test_git_single(self):
msg = self.send_beanstalk_message('git_singlecommit')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [e50508d](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/e50508df): add some stuff
""")
@slow(0.20, "lots of queries")
def test_git_multiple(self):
msg = self.send_beanstalk_message('git_multiple')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [edf529c](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/edf529c7): Added new file
* [c2a191b](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/c2a191b9): Filled in new file with some stuff
* [2009815](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/20098158): More work to fix some bugs
""")
def test_svn_addremove(self):
msg = self.send_beanstalk_message('svn_addremove')
self.assertEqual(msg.subject, "svn r3")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 3](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/3):
> Removed a file and added another one!""")
def test_svn_changefile(self):
msg = self.send_beanstalk_message('svn_changefile')
self.assertEqual(msg.subject, "svn r2")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 2](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/2):
> Added some code""")
class GithubV1HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v1_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v1_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class GithubV2HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v2_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v2_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class PivotalV3HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', name, file_type='xml'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi accepted "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, 'Comment added')
self.assertEqual(msg.content, 'Leo Franchi added comment: "FIX THIS NOW" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi added "My new Feature story" \
(unscheduled feature):\n\n~~~ quote\nThis is my long description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi delivered "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi finished "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi rejected "Another new story" with comments: \
"Not good enough, sorry" [(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi started "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi added "Another new story" \
(unscheduled feature worth 2 story points):\n\n~~~ quote\nSome loong description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
class PivotalV5HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', "v5_" + name, file_type='json'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **unstarted** to **accepted**
""")
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi added a comment to [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
~~~quote
A comment on the story
~~~""")
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, '#63495662: Story that I created')
self.assertEqual(msg.content, """Leo Franchi created bug: [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story that I created](http://www.pivotaltracker.com/story/show/63495662)
* State is **unscheduled**
* Description is
> What a description""")
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **accepted** to **delivered**
""")
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **delivered** to **accepted**
""")
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi moved [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066) from **unstarted** to **unscheduled**""")
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* Comment added:
~~~quote
Try again next time
~~~
* state changed from **delivered** to **rejected**
""")
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, '#63495972: Fresh Story')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Fresh Story](http://www.pivotaltracker.com/story/show/63495972):
* state changed from **unstarted** to **started**
""")
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate is now **3 points**
""")
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate changed from 3 to **0 points**
* type changed from **feature** to **bug**
""")
class NewRelicHookTests(AuthedTestCase):
def send_new_relic_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/newrelic?api_key=%s&stream=%s" % (api_key,"newrelic"),
self.fixture_data('newrelic', name, file_type='txt'),
stream_name="newrelic",
content_type="application/x-www-form-urlencoded")
def test_alert(self):
msg = self.send_new_relic_message('alert')
self.assertEqual(msg.subject, "Apdex score fell below critical level of 0.90")
self.assertEqual(msg.content, 'Alert opened on [application name]: \
Apdex score fell below critical level of 0.90\n\
[View alert](https://rpm.newrelc.com/accounts/[account_id]/applications/[application_id]/incidents/[incident_id])')
def test_deployment(self):
msg = self.send_new_relic_message('deployment')
self.assertEqual(msg.subject, 'Test App deploy')
self.assertEqual(msg.content, '`1242` deployed by **Zulip Test**\n\
Description sent via curl\n\nChangelog string')
class StashHookTests(AuthedTestCase):
def test_stash_message(self):
"""
Messages are generated by Stash on a `git push`.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
email = "hamlet@zulip.com"
msg = self.send_json_payload(
email, "/api/v1/external/stash?stream=commits",
self.fixture_data("stash", "push", file_type="json"),
stream_name="commits",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
self.assertEqual(msg.subject, u"Secret project/Operation unicorn: master")
self.assertEqual(msg.content, """`f259e90` was pushed to **master** in **Secret project/Operation unicorn** with:
* `f259e90`: Updating poms ...""")
class FreshdeskHookTests(AuthedTestCase):
def generate_webhook_response(self, fixture):
"""
Helper function to handle the webhook boilerplate.
"""
email = "hamlet@zulip.com"
return self.send_json_payload(
email, "/api/v1/external/freshdesk?stream=freshdesk",
self.fixture_data("freshdesk", fixture, file_type="json"),
stream_name="freshdesk",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
def test_ticket_creation(self):
"""
Messages are generated on ticket creation through Freshdesk's
"Dispatch'r" service.
"""
msg = self.generate_webhook_response("ticket_created")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, u"""Requester ☃ Bob <requester-bob@example.com> created [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
~~~ quote
Test ticket description ☃.
~~~
Type: **Incident**
Priority: **High**
Status: **Pending**""")
def test_status_change(self):
"""
Messages are generated when a ticket's status changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("status_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Status: **Resolved** => **Waiting on Customer**""")
def test_priority_change(self):
"""
Messages are generated when a ticket's priority changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("priority_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Priority: **High** => **Low**""")
def note_change(self, fixture, note_type):
"""
Messages are generated when a note gets added to a ticket through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response(fixture)
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> added a %s note to [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11).""" % (note_type,))
def test_private_note_change(self):
self.note_change("private_note", "private")
def test_public_note_change(self):
self.note_change("public_note", "public")
def test_inline_image(self):
"""
Freshdesk sends us descriptions as HTML, so we have to make the
descriptions Zulip markdown-friendly while still doing our best to
preserve links and images.
"""
msg = self.generate_webhook_response("inline_images")
self.assertEqual(msg.subject, u"#12: Not enough ☃ guinea pigs")
self.assertIn("[guinea_pig.png](http://cdn.freshdesk.com/data/helpdesk/attachments/production/12744808/original/guinea_pig.png)", msg.content)
class ZenDeskHookTests(AuthedTestCase):
def generate_webhook_response(self, ticket_title='User can\'t login',
ticket_id=54, message='Message',
stream_name='zendesk'):
data = {
'ticket_title': ticket_title,
'ticket_id': ticket_id,
'message': message,
'stream': stream_name,
}
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
result = self.client.post('/api/v1/external/zendesk', data,
**self.api_auth(email))
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket")
self.assertEqual(msg.subject, '#4: Test ticket')
def test_long_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket" + '!' * 80)
self.assertEqual(msg.subject, '#4: Test ticket' + '!' * 42 + '...')
def test_content(self):
msg = self.generate_webhook_response(message='New comment:\n> It is better\n* here')
self.assertEqual(msg.content, 'New comment:\n> It is better\n* here')
class PagerDutyHookTests(AuthedTestCase):
def send_webhook(self, data, stream_name, topic=None):
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
api_key = self.get_api_key(email)
if topic:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s&topic=%s' % (api_key, stream_name, topic)
else:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s' % (api_key, stream_name)
result = self.client.post(url, ujson.dumps(data), content_type="application/json")
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_trigger(self):
data = ujson.loads(self.fixture_data('pagerduty', 'trigger'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_unacknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'unacknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':healthy_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_auto_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'auto_resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 2')
self.assertEqual(
msg.content,
':healthy_heart: Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved\n\n>new'
)
def test_acknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_no_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'mp_fail'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 48219')
self.assertEqual(
msg.content,
u':healthy_heart: Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved\n\n>mp_error_block_down_critical\u2119\u01b4'
)
def test_explicit_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty', topic="my+cool+topic")
self.assertEqual(msg.subject, 'my cool topic')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_bad_message(self):
data = {'messages': [{'type': 'incident.triggered'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'incident.triggered\'}\n```'
)
def test_unknown_message_type(self):
data = {'messages': [{'type': 'foo'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'foo\'}\n```'
)
|
jorgeer/brewing
|
refs/heads/master
|
tests/factories.py
|
1
|
# -*- coding: utf-8 -*-
from factory import Sequence, PostGenerationMethodCall
from factory.alchemy import SQLAlchemyModelFactory
from brewing.user.models import User
from brewing.database import db
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
username = Sequence(lambda n: "user{0}".format(n))
email = Sequence(lambda n: "user{0}@example.com".format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
model = User
|
dleicht/PSB
|
refs/heads/master
|
apiclient/schema.py
|
149
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client import util
from oauth2client.anyjson import simplejson
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
@util.positional(2)
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent=dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
@util.positional(2)
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
@util.positional(3)
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
if 'properties' in schema:
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
elif 'additionalProperties' in schema:
self.emitBegin('"a_key": ')
self._to_str_impl(schema['additionalProperties'])
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, seen=self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
|
qiankunshe/sky_engine
|
refs/heads/master
|
sky/tools/webkitpy/common/net/buildbot/buildbot_unittest.py
|
24
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(None)
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
"random junk": None,
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
def test_file_info_list_to_revision_to_build_list(self):
file_info_list = [
{"filename": "r47483 (1)/"},
{"filename": "r47483 (1).zip"},
{"filename": "random junk"},
]
builds_and_revisions_list = [(47483, 1), (47483, 1)]
self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
def test_fetch_build(self):
buildbot = BuildBot()
builder = Builder(u"Test Builder \u2661", buildbot)
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
},
"number": int(build_number),
# Intentionally missing the 'results' key, meaning it's a "pass" build.
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
self.assertIsNotNone(builder._fetch_build(1))
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEqual(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertFalse(build.is_green())
self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
_fake_builder_page = '''
<body>
<div class="content">
<h1>Some Builder</h1>
<p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
<div class="column">
<h2>Recent Builds:</h2>
<table class="info">
<tr>
<th>Time</th>
<th>Revision</th>
<th>Result</th> <th>Build #</th>
<th>Info</th>
</tr>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">success</td> <td><a href=".../37602">#37602</a></td>
<td class="left">Build successful</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
<td class="left">Failed compile-webkit</td>
</tr>
</table>
</body>'''
_fake_builder_page_without_success = '''
<body>
<table>
<tr class="alt">
<td>Jan 10 15:49</td>
<td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 15:32</td>
<td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="alt">
<td>Jan 10 15:18</td>
<td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
<td class="success">failure</td>
</tr>
<tr class="">
<td>Jan 10 11:58</td>
<td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
<td class="retry">retry</td>
</tr>
<tr class="">
<td>Jan 10 14:51</td>
<td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
<td class="failure">failure</td>
</tr>
</table>
</body>'''
def test_revisions_for_builder(self):
buildbot = BuildBot()
buildbot._fetch_builder_page = lambda builder: builder.page
builder_with_success = Builder('Some builder', None)
builder_with_success.page = self._fake_builder_page
self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
builder_without_success = Builder('Some builder', None)
builder_without_success.page = self._fake_builder_page_without_success
self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
def test_find_green_revision(self):
buildbot = BuildBot()
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, True), (3, False)],
'Builder 3': [(1, True), (3, True)],
}), 1)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (3, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, False), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [(1, True), (2, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), 2)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, False), (2, True)],
'Builder 2': [(1, True), (3, True)],
'Builder 3': [(1, True), (3, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
'Builder 3': [(2, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (4, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, True)],
'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
'Builder 3': [(2, True), (3, True), (4, True)],
}), 3)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (2, True)],
'Builder 2': [],
'Builder 3': [(1, True), (2, True)],
}), None)
self.assertEqual(buildbot._find_green_revision({
'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEqual("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
|
russellb/powerline
|
refs/heads/develop
|
powerline/lib/debug.py
|
35
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import gc
import sys
from types import FrameType
from itertools import chain
# From http://code.activestate.com/recipes/523004-find-cyclical-references/
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
'''Find reference cycles
:param list objects:
A list of objects to find cycles in. It is often useful to pass in
gc.garbage to find the cycles that are preventing some objects from
being garbage collected.
:param file outstream:
The stream for output.
:param bool show_progress:
If True, print the number of objects reached as they are found.
'''
def print_path(path):
for i, step in enumerate(path):
# next “wraps around”
next = path[(i + 1) % len(path)]
outstream.write(' %s -- ' % str(type(step)))
written = False
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write('[%s]' % repr(key))
written = True
break
if key is next:
outstream.write('[key] = %s' % repr(val))
written = True
break
elif isinstance(step, (list, tuple)):
for i, item in enumerate(step):
if item is next:
outstream.write('[%d]' % i)
written = True
elif getattr(type(step), '__getattribute__', None) in (object.__getattribute__, type.__getattribute__):
for attr in chain(dir(step), getattr(step, '__dict__', ())):
if getattr(step, attr, None) is next:
try:
outstream.write('%r.%s' % (step, attr))
except TypeError:
outstream.write('.%s' % (step, attr))
written = True
break
if not written:
outstream.write(repr(step))
outstream.write(' ->\n')
outstream.write('\n')
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write('%d\r' % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we’ve found our way back to the start, this is
# a cycle, so print it out
if referent is start:
try:
outstream.write('Cyclic reference: %r\n' % referent)
except TypeError:
try:
outstream.write('Cyclic reference: %i (%r)\n' % (id(referent), type(referent)))
except TypeError:
outstream.write('Cyclic reference: %i\n' % id(referent))
print_path(current_path)
# Don’t go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven’t seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + (obj,))
for obj in objects:
# We are not interested in non-powerline cyclic references
try:
if not type(obj).__module__.startswith('powerline'):
continue
except AttributeError:
continue
recurse(obj, obj, {}, ())
|
luxnovalabs/enjigo_door
|
refs/heads/master
|
web_interface/django/contrib/formtools/tests/urls.py
|
195
|
"""
This is a URLconf to be loaded by tests.py. Add any URLs needed for tests only.
"""
from __future__ import absolute_import
from django.conf.urls import patterns, url
from django.contrib.formtools.tests import TestFormPreview, TestWizardClass
from django.contrib.formtools.tests.forms import (ContactWizard, Page1, Page2,
Page3, TestForm, WizardPageOneForm, WizardPageTwoForm, WizardPageThreeForm)
urlpatterns = patterns('',
url(r'^preview/', TestFormPreview(TestForm)),
url(r'^wizard1/$', TestWizardClass(
[WizardPageOneForm, WizardPageTwoForm, WizardPageThreeForm])),
url(r'^wizard2/$', ContactWizard([Page1, Page2, Page3])),
)
|
cngo-github/nupic
|
refs/heads/master
|
examples/opf/clients/hotgym/anomaly/one_gym/nupic_anomaly_output.py
|
49
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num, DateFormatter
except ImportError:
pass
WINDOW = 300
HIGHLIGHT_ALPHA = 0.3
ANOMALY_HIGHLIGHT_COLOR = 'red'
WEEKEND_HIGHLIGHT_COLOR = 'yellow'
ANOMALY_THRESHOLD = 0.9
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
@abstractmethod
def write(self, timestamp, value, predicted, anomalyScore):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCount = 0
headerRow = [
'timestamp', 'kw_energy_consumption', 'prediction',
'anomaly_score', 'anomaly_likelihood'
]
outputFileName = "%s_out.csv" % self.name
print "Preparing to output %s data to %s" % (self.name, outputFileName)
self.outputFile = open(outputFileName, "w")
self.outputWriter = csv.writer(self.outputFile)
self.outputWriter.writerow(headerRow)
def write(self, timestamp, value, predicted, anomalyScore):
if timestamp is not None:
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood]
self.outputWriter.writerow(outputRow)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name)
def extractWeekendHighlights(dates):
weekendsOut = []
weekendSearch = [5, 6]
weekendStart = None
for i, date in enumerate(dates):
if date.weekday() in weekendSearch:
if weekendStart is None:
# Mark start of weekend
weekendStart = i
else:
if weekendStart is not None:
# Mark end of weekend
weekendsOut.append((
weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
weekendStart = None
# Cap it off if we're still in the middle of a weekend
if weekendStart is not None:
weekendsOut.append((
weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return weekendsOut
def extractAnomalyIndices(anomalyLikelihood):
anomaliesOut = []
anomalyStart = None
for i, likelihood in enumerate(anomalyLikelihood):
if likelihood >= ANOMALY_THRESHOLD:
if anomalyStart is None:
# Mark start of anomaly
anomalyStart = i
else:
if anomalyStart is not None:
# Mark end of anomaly
anomaliesOut.append((
anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
anomalyStart = None
# Cap it off if we're still in the middle of an anomaly
if anomalyStart is not None:
anomaliesOut.append((
anomalyStart, len(anomalyLikelihood)-1,
ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return anomaliesOut
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.value = []
self.allValues = []
self.predicted = []
self.anomalyScore = []
self.anomalyLikelihood = []
self.actualLine = None
self.predictedLine = None
self.anomalyScoreLine = None
self.anomalyLikelihoodLine = None
self.linesInitialized = False
self._chartHighlights = []
fig = plt.figure(figsize=(16, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._mainGraph = fig.add_subplot(gs[0, 0])
plt.title(self.name)
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
self._anomalyGraph = fig.add_subplot(gs[1])
plt.ylabel('Percentage')
plt.xlabel('Date')
# Maximizes window
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.tight_layout()
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%m/%d %H:%M')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def highlightChart(self, highlights, chart):
for highlight in highlights:
# Each highlight contains [start-index, stop-index, color, alpha]
self._chartHighlights.append(chart.axvspan(
self.convertedDates[highlight[0]], self.convertedDates[highlight[1]],
color=highlight[2], alpha=highlight[3]
))
def write(self, timestamp, value, predicted, anomalyScore):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.allValues.append(value)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(self.allValues)
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
|
balanced/PyXB
|
refs/heads/master
|
pyxb/namespace/archive.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright 2009-2013, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes and global objects related to archiving U{XML
Namespaces<http://www.w3.org/TR/2006/REC-xml-names-20060816/index.html>}."""
import pyxb
import os
import os.path
import pyxb.utils.utility
import logging
_log = logging.getLogger(__name__)
PathEnvironmentVariable = 'PYXB_ARCHIVE_PATH'
"""Environment variable from which default path to pre-loaded namespaces is
read. The value should be a colon-separated list of absolute paths. The
character C{&} at the start of a member of the list is replaced by the path to
the directory where the %{pyxb} modules are found, including a trailing C{/}.
For example, use C{&pyxb/bundles//} to enable search of any archive bundled
with PyXB.
@note: If you put a path separator between C{&} and the following path, this
will cause the substitution to be ignored."""
DefaultArchivePrefix = os.path.realpath(os.path.join(os.path.dirname( __file__), '../..'))
"""The default archive prefix, substituted for C{&} in C{PYXB_ARCHIVE_PATH}."""
def GetArchivePath ():
"""Return the archive path as defined by the L{PathEnvironmentVariable},
or C{None} if that variable is not defined."""
return os.environ.get(PathEnvironmentVariable)
# Stuff required for pickling
import cPickle as pickle
import re
class NamespaceArchive (object):
"""Represent a file from which one or more namespaces can be read, or to
which they will be written."""
# A code used to identify the format of the archive, so we don't
# mis-interpret its contents.
# YYYYMMDDHHMM
__PickleFormat = '200907190858'
@classmethod
def _AnonymousCategory (cls):
"""The category name to use when storing references to anonymous type
definitions. For example, attribute definitions defined within an
attribute use in a model group definition.that can be referenced frojm
ax different namespace."""
return cls.__AnonymousCategory
__AnonymousCategory = '_anonymousTypeDefinition'
@classmethod
def PicklingArchive (cls):
"""Return a reference to a set specifying the namespace instances that
are being archived.
This is needed to determine whether a component must be serialized as
aa reference."""
# NB: Use root class explicitly. If we use cls, when this is invoked
# by subclasses it gets mangled using the subclass name so the one
# defined in this class is not found
return NamespaceArchive.__PicklingArchive
# Class variable recording the namespace that is currently being
# pickled. Used to prevent storing components that belong to
# other namespaces. Should be None unless within an invocation of
# SaveToFile.
__PicklingArchive = None
__NamespaceArchives = None
"""A mapping from generation UID to NamespaceArchive instances."""
def discard (self):
"""Remove this archive from the set of available archives.
This is invoked when an archive contains a namespace that the user has
specified should not be loaded."""
del self.__NamespaceArchives[self.generationUID()]
for ns in self.__namespaces:
ns._removeArchive(self)
@classmethod
def __GetArchiveInstance (cls, archive_file, stage=None):
"""Return a L{NamespaceArchive} instance associated with the given file.
To the extent possible, the same file accessed through different paths
returns the same L{NamespaceArchive} instance.
"""
nsa = NamespaceArchive(archive_path=archive_file, stage=cls._STAGE_uid)
rv = cls.__NamespaceArchives.get(nsa.generationUID(), nsa)
if rv == nsa:
cls.__NamespaceArchives[rv.generationUID()] = rv
rv._readToStage(stage)
return rv
__ArchivePattern_re = re.compile('\.wxs$')
@classmethod
def PreLoadArchives (cls, archive_path=None, reset=False):
"""Scan for available archives, associating them with namespaces.
This only validates potential archive contents; it does not load
namespace data from the archives.
@keyword archive_path: A list of files or directories in which
namespace archives can be found. The entries are separated by
os.pathsep, which is a colon on POSIX platforms and a semi-colon on
Windows. See L{PathEnvironmentVariable}. Defaults to
L{GetArchivePath()}. If not defaulted, C{reset} will be forced to
C{True}. For any directory in the path, all files ending with
C{.wxs} are examined.
@keyword reset: If C{False} (default), the most recently read set of
archives is returned; if C{True}, the archive path is re-scanned and the
namespace associations validated.
"""
from pyxb.namespace import builtin
reset = reset or (archive_path is not None) or (cls.__NamespaceArchives is None)
if reset:
# Get a list of pre-existing archives, initializing the map if
# this is the first time through.
if cls.__NamespaceArchives is None:
cls.__NamespaceArchives = { }
existing_archives = set(cls.__NamespaceArchives.itervalues())
archive_set = set()
# Ensure we have an archive path. If not, don't do anything.
if archive_path is None:
archive_path = GetArchivePath()
if archive_path is not None:
# Get archive instances for everything in the archive path
candidate_files = pyxb.utils.utility.GetMatchingFiles(archive_path, cls.__ArchivePattern_re,
default_path_wildcard='+', default_path=GetArchivePath(),
prefix_pattern='&', prefix_substituend=DefaultArchivePrefix)
for afn in candidate_files:
try:
nsa = cls.__GetArchiveInstance(afn, stage=cls._STAGE_readModules)
archive_set.add(nsa)
except pickle.UnpicklingError:
_log.exception('Cannot unpickle archive %s', afn)
except pyxb.NamespaceArchiveError:
_log.exception('Cannot process archive %s', afn)
# Do this for two reasons: first, to get an iterable that won't
# cause problems when we remove unresolvable archives from
# archive_set; and second to aid with forced dependency inversion
# testing
ordered_archives = sorted(list(archive_set), key=lambda _a: _a.archivePath())
ordered_archives.reverse()
# Create a graph that identifies dependencies between the archives
archive_map = { }
for a in archive_set:
archive_map[a.generationUID()] = a
archive_graph = pyxb.utils.utility.Graph()
for a in ordered_archives:
prereqs = a._unsatisfiedModulePrerequisites()
if 0 < len(prereqs):
for p in prereqs:
if builtin.BuiltInObjectUID == p:
continue
da = archive_map.get(p)
if da is None:
_log.warning('%s depends on unavailable archive %s', a, p)
archive_set.remove(a)
else:
archive_graph.addEdge(a, da)
else:
archive_graph.addRoot(a)
# Verify that there are no dependency loops.
archive_scc = archive_graph.sccOrder()
for scc in archive_scc:
if 1 < len(scc):
raise pyxb.LogicError("Cycle in archive dependencies. How'd you do that?\n " + "\n ".join([ _a.archivePath() for _a in scc ]))
archive = scc[0]
if not (archive in archive_set):
archive.discard()
existing_archives.remove(archive)
continue
#archive._readToStage(cls._STAGE_COMPLETE)
# Discard any archives that we used to know about but now aren't
# supposed to. @todo make this friendlier in the case of archives
# we've already incorporated.
for archive in existing_archives.difference(archive_set):
_log.info('Discarding excluded archive %s', archive)
archive.discard()
def archivePath (self):
"""Path to the file in which this namespace archive is stored."""
return self.__archivePath
__archivePath = None
def generationUID (self):
"""The unique identifier for the generation that produced this archive."""
return self.__generationUID
__generationUID = None
def isLoadable (self):
"""Return C{True} iff it is permissible to load the archive.
Archives created for output cannot be loaded."""
return self.__isLoadable
__isLoadable = None
def __locateModuleRecords (self):
self.__moduleRecords = set()
namespaces = set()
for ns in pyxb.namespace.utility.AvailableNamespaces():
# @todo allow these; right now it's usually the XML
# namespace and we're not prepared to reconcile
# redefinitions of those components.
if ns.isUndeclaredNamespace():
continue
mr = ns.lookupModuleRecordByUID(self.generationUID())
if mr is not None:
namespaces.add(ns)
mr.prepareForArchive(self)
self.__moduleRecords.add(mr)
self.__namespaces.update(namespaces)
def moduleRecords (self):
"""Return the set of L{module records <ModuleRecord>} stored in this
archive.
Each module record represents"""
return self.__moduleRecords
__moduleRecords = None
@classmethod
def ForPath (cls, archive_file):
"""Return the L{NamespaceArchive} instance that can be found at the
given path."""
return cls.__GetArchiveInstance(archive_file)
# States in the finite automaton that is used to read archive contents.
_STAGE_UNOPENED = 0 # Haven't even checked for existence
_STAGE_uid = 1 # Verified archive exists, obtained generation UID from it
_STAGE_readModules = 2 # Read module records from archive, which includes UID dependences
_STAGE_validateModules = 3 # Verified pre-requisites for module loading
_STAGE_readComponents = 4 # Extracted components from archive and integrated into namespaces
_STAGE_COMPLETE = _STAGE_readComponents
def _stage (self):
return self.__stage
__stage = None
def __init__ (self, archive_path=None, generation_uid=None, loadable=True, stage=None):
"""Create a new namespace archive.
If C{namespaces} is given, this is an output archive.
If C{namespaces} is absent, this is an input archive.
@raise IOError: error attempting to read the archive file
@raise pickle.UnpicklingError: something is wrong with the format of the library
"""
self.__namespaces = set()
if generation_uid is not None:
if archive_path:
raise pyxb.LogicError('NamespaceArchive: cannot define both namespaces and archive_path')
self.__generationUID = generation_uid
self.__locateModuleRecords()
elif archive_path is not None:
if generation_uid is not None:
raise pyxb.LogicError('NamespaceArchive: cannot provide generation_uid with archive_path')
self.__archivePath = archive_path
self.__stage = self._STAGE_UNOPENED
self.__isLoadable = loadable
if self.__isLoadable:
if stage is None:
stage = self._STAGE_moduleRecords
self._readToStage(stage)
else:
pass
def add (self, namespace):
"""Add the given namespace to the set that is to be stored in this archive."""
if namespace.isAbsentNamespace():
raise pyxb.NamespaceArchiveError('Cannot archive absent namespace')
self.__namespaces.add(namespace)
def update (self, namespace_set):
"""Add the given namespaces to the set that is to be stored in this archive."""
[ self.add(_ns) for _ns in namespace_set ]
def namespaces (self):
"""Set of namespaces that can be read from this archive."""
return self.__namespaces
__namespaces = None
def __createPickler (self, output):
if isinstance(output, basestring):
output = open(output, 'wb')
pickler = pickle.Pickler(output, -1)
# The format of the archive
pickler.dump(NamespaceArchive.__PickleFormat)
# The UID for the set
assert self.generationUID() is not None
pickler.dump(self.generationUID())
return pickler
def __createUnpickler (self):
unpickler = pickle.Unpickler(open(self.__archivePath, 'rb'))
fmt = unpickler.load()
if self.__PickleFormat != fmt:
raise pyxb.NamespaceArchiveError('Archive format is %s, require %s' % (fmt, self.__PickleFormat))
self.__generationUID = unpickler.load()
return unpickler
def __readModules (self, unpickler):
mrs = unpickler.load()
assert isinstance(mrs, set), 'Expected set got %s from %s' % (type(mrs), self.archivePath())
if self.__moduleRecords is None:
for mr in mrs.copy():
mr2 = mr.namespace().lookupModuleRecordByUID(mr.generationUID())
if mr2 is not None:
mr2._setFromOther(mr, self)
mrs.remove(mr)
self.__moduleRecords = set()
assert 0 == len(self.__namespaces)
for mr in mrs:
mr._setArchive(self)
ns = mr.namespace()
ns.addModuleRecord(mr)
self.__namespaces.add(ns)
self.__moduleRecords.add(mr)
else:
# Verify the archive still has what was in it when we created this.
for mr in mrs:
mr2 = mr.namespace().lookupModuleRecordByUID(mr.generationUID())
if not (mr2 in self.__moduleRecords):
raise pyxb.NamespaceArchiveError('Lost module record %s %s from %s' % (mr.namespace(), mr.generationUID(), self.archivePath()))
def _unsatisfiedModulePrerequisites (self):
prereq_uids = set()
for mr in self.__moduleRecords:
prereq_uids.update(mr.dependsOnExternal())
return prereq_uids
def __validatePrerequisites (self, stage):
from pyxb.namespace import builtin
prereq_uids = self._unsatisfiedModulePrerequisites()
for uid in prereq_uids:
if builtin.BuiltInObjectUID == uid:
continue
depends_on = self.__NamespaceArchives.get(uid)
if depends_on is None:
raise pyxb.NamespaceArchiveError('%s: archive depends on unavailable archive %s' % (self.archivePath(), uid))
depends_on._readToStage(stage)
def __validateModules (self):
self.__validatePrerequisites(self._STAGE_validateModules)
for mr in self.__moduleRecords:
ns = mr.namespace()
for base_uid in mr.dependsOnExternal():
xmr = ns.lookupModuleRecordByUID(base_uid)
if xmr is None:
raise pyxb.NamespaceArchiveError('Module %s depends on external module %s, not available in archive path' % (mr.generationUID(), base_uid))
if not xmr.isIncorporated():
_log.info('Need to incorporate data from %s', xmr)
else:
_log.info('Have required base data %s', xmr)
for origin in mr.origins():
for (cat, names) in origin.categoryMembers().iteritems():
if not (cat in ns.categories()):
continue
cross_objects = names.intersection(ns.categoryMap(cat).iterkeys())
if 0 < len(cross_objects):
raise pyxb.NamespaceArchiveError('Archive %s namespace %s module %s origin %s archive/active conflict on category %s: %s' % (self.__archivePath, ns, mr, origin, cat, " ".join(cross_objects)))
_log.info('%s no conflicts on %d names', cat, len(names))
def __readComponentSet (self, unpickler):
self.__validatePrerequisites(self._STAGE_readComponents)
for n in range(len(self.__moduleRecords)):
ns = unpickler.load()
mr = ns.lookupModuleRecordByUID(self.generationUID())
assert mr in self.__moduleRecords
assert not mr.isIncorporated()
objects = unpickler.load()
mr._loadCategoryObjects(objects)
__unpickler = None
def _readToStage (self, stage):
if self.__stage is None:
raise pyxb.NamespaceArchiveError('Attempt to read from invalid archive %s' % (self,))
try:
while self.__stage < stage:
if self.__stage < self._STAGE_uid:
self.__unpickler = self.__createUnpickler()
self.__stage = self._STAGE_uid
continue
if self.__stage < self._STAGE_readModules:
assert self.__unpickler is not None
self.__readModules(self.__unpickler)
self.__stage = self._STAGE_readModules
continue
if self.__stage < self._STAGE_validateModules:
self.__validateModules()
self.__stage = self._STAGE_validateModules
continue
if self.__stage < self._STAGE_readComponents:
assert self.__unpickler is not None
self.__stage = self._STAGE_readComponents
self.__readComponentSet(self.__unpickler)
self.__unpickler = None
continue
raise pyxb.LogicError('Too many stages (at %s, want %s)' % (self.__stage, stage))
except:
self.__stage = None
self.__unpickler = None
raise
def readNamespaces (self):
"""Read all the components from this archive, integrating them into
their respective namespaces."""
self._readToStage(self._STAGE_COMPLETE)
def writeNamespaces (self, output):
"""Store the namespaces into the archive.
@param output: An instance substitutable for a writable file, or the
name of a file to write to.
"""
import sys
assert NamespaceArchive.__PicklingArchive is None
NamespaceArchive.__PicklingArchive = self
assert self.__moduleRecords is not None
# Recalculate the record/object associations: we didn't assign
# anonymous names to the indeterminate scope objects because they
# weren't needed for bindings, but they are needed in the archive.
for mr in self.__moduleRecords:
mr.namespace()._associateOrigins(mr)
try:
# See http://bugs.python.org/issue3338
recursion_limit = sys.getrecursionlimit()
sys.setrecursionlimit(10 * recursion_limit)
pickler = self.__createPickler(output)
assert isinstance(self.__moduleRecords, set)
pickler.dump(self.__moduleRecords)
for mr in self.__moduleRecords:
pickler.dump(mr.namespace())
pickler.dump(mr.categoryObjects())
finally:
sys.setrecursionlimit(recursion_limit)
NamespaceArchive.__PicklingArchive = None
def __str__ (self):
archive_path = self.__archivePath
if archive_path is None:
archive_path = '??'
return 'NSArchive@%s' % (archive_path,)
class _ArchivableObject_mixin (pyxb.cscRoot):
"""Mix-in to any object that can be stored in a namespace within an archive."""
# Need to set this per category item
__objectOrigin = None
def _objectOrigin (self):
return self.__objectOrigin
def _setObjectOrigin (self, object_origin, override=False):
if (self.__objectOrigin is not None) and (not override):
if self.__objectOrigin != object_origin:
raise pyxb.LogicError('Inconsistent origins for object %s: %s %s' % (self, self.__objectOrigin, object_origin))
else:
self.__objectOrigin = object_origin
def _prepareForArchive (self, archive):
#assert self.__objectOrigin is not None
if self._objectOrigin() is not None:
return getattr(super(_ArchivableObject_mixin, self), '_prepareForArchive_csc', lambda *_args,**_kw: self)(self._objectOrigin().moduleRecord())
assert not isinstance(self, pyxb.xmlschema.structures._NamedComponent_mixin)
def _updateFromOther_csc (self, other):
return getattr(super(_ArchivableObject_mixin, self), '_updateFromOther_csc', lambda *_args,**_kw: self)(other)
def _updateFromOther (self, other):
"""Update this instance with additional information provided by the other instance.
This is used, for example, when a built-in type is already registered
in the namespace, but we've processed the corresponding schema and
have obtained more details."""
assert self != other
return self._updateFromOther_csc(other)
def _allowUpdateFromOther (self, other):
from pyxb.namespace import builtin
assert self._objectOrigin()
return builtin.BuiltInObjectUID == self._objectOrigin().generationUID()
class _NamespaceArchivable_mixin (pyxb.cscRoot):
"""Encapsulate the operations and data relevant to archiving namespaces.
This class mixes-in to L{pyxb.namespace.Namespace}"""
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles category-related data."""
getattr(super(_NamespaceArchivable_mixin, self), '_reset', lambda *args, **kw: None)()
self.__loadedFromArchive = None
self.__wroteToArchive = None
self.__active = False
self.__moduleRecordMap = {}
def _loadedFromArchive (self):
return self.__loadedFromArchive
__wroteToArchive = None
__loadedFromArchive = None
def isActive (self, empty_inactive=False):
if self.__isActive and empty_inactive:
for (ct, cm) in self._categoryMap().iteritems():
if 0 < len(cm):
return True
return False
return self.__isActive
def _activate (self):
self.__isActive = True
__isActive = None
def __init__ (self, *args, **kw):
super(_NamespaceArchivable_mixin, self).__init__(*args, **kw)
def _setLoadedFromArchive (self, archive):
self.__loadedFromArchive = archive
self._activate()
def _setWroteToArchive (self, archive):
self.__wroteToArchive = archive
def _removeArchive (self, archive):
# Yes, I do want this to raise KeyError if the archive is not present
mr = self.__moduleRecordMap[archive.generationUID()]
assert not mr.isIncorporated(), 'Removing archive %s after incorporation' % (archive.archivePath(),)
del self.__moduleRecordMap[archive.generationUID()]
def isLoadable (self):
"""Return C{True} iff the component model for this namespace can be
loaded from a namespace archive."""
for mr in self.moduleRecords():
if mr.isLoadable():
return True
return False
def isImportAugmentable (self):
"""Return C{True} iff the component model for this namespace may be
extended by import directives.
This is the case if the namespace has been marked with
L{setImportAugmentable}, or if there is no archive or built-in that
provides a component model for the namespace."""
if self.__isImportAugmentable:
return True
for mr in self.moduleRecords():
if mr.isLoadable() or mr.isIncorporated():
return False
return True
def setImportAugmentable (self, value=True):
self.__isImportAugmentable = value
__isImportAugmentable = False
def loadableFrom (self):
"""Return the list of archives from which components for this
namespace can be loaded."""
rv = []
for mr in self.moduleRecords():
if mr.isLoadable():
rv.append(mr.archive())
return rv
def moduleRecords (self):
return self.__moduleRecordMap.values()
__moduleRecordMap = None
def addModuleRecord (self, module_record):
assert isinstance(module_record, ModuleRecord)
assert not (module_record.generationUID() in self.__moduleRecordMap)
self.__moduleRecordMap[module_record.generationUID()] = module_record
return module_record
def lookupModuleRecordByUID (self, generation_uid, create_if_missing=False, *args, **kw):
rv = self.__moduleRecordMap.get(generation_uid)
if (rv is None) and create_if_missing:
rv = self.addModuleRecord(ModuleRecord(self, generation_uid, *args, **kw))
return rv
def _setState_csc (self, kw):
#assert not self.__isActive, 'ERROR: State set for active namespace %s' % (self,)
return getattr(super(_NamespaceArchivable_mixin, self), '_getState_csc', lambda _kw: _kw)(kw)
def markNotLoadable (self):
"""Prevent loading this namespace from an archive.
This marks all archives in which the namespace appears, whether
publically or privately, as not loadable."""
if self._loadedFromArchive():
raise pyxb.NamespaceError(self, 'cannot mark not loadable when already loaded')
for mr in self.moduleRecords():
mr._setIsLoadable(False)
class ModuleRecord (pyxb.utils.utility.PrivateTransient_mixin):
__PrivateTransient = set()
def namespace (self):
return self.__namespace
__namespace = None
def archive (self):
return self.__archive
def _setArchive (self, archive):
self.__archive = archive
return self
__archive = None
__PrivateTransient.add('archive')
def isPublic (self):
return self.__isPublic
def _setIsPublic (self, is_public):
self.__isPublic = is_public
return self
__isPublic = None
def isIncorporated (self):
return self.__isIncorporated or (self.archive() is None)
def markIncorporated (self):
assert self.__isLoadable
self.__isIncorporated = True
self.__isLoadable = False
return self
__isIncorporated = None
__PrivateTransient.add('isIncorporated')
def isLoadable (self):
return self.__isLoadable and (self.archive() is not None)
def _setIsLoadable (self, is_loadable):
self.__isLoadable = is_loadable
return self
__isLoadable = None
def generationUID (self):
return self.__generationUID
__generationUID = None
def origins (self):
return self.__originMap.values()
def addOrigin (self, origin):
assert isinstance(origin, _ObjectOrigin)
assert not (origin.signature() in self.__originMap)
self.__originMap[origin.signature()] = origin
return origin
def lookupOriginBySignature (self, signature):
return self.__originMap.get(signature)
def _setOrigins (self, origins):
if self.__originMap is None:
self.__originMap = {}
else:
self.__originMap.clear()
[ self.addOrigin(_o) for _o in origins ]
return self
__originMap = None
def hasMatchingOrigin (self, **kw):
for origin in self.origins():
if origin.match(**kw):
return True
return False
def modulePath (self):
return self.__modulePath
def setModulePath (self, module_path):
assert (module_path is None) or isinstance(module_path, basestring)
self.__modulePath = module_path
return self
__modulePath = None
def referencedNamespaces (self):
return self.__referencedNamespaces
def _setReferencedNamespaces (self, referenced_namespaces):
self.__referencedNamespaces.update(referenced_namespaces)
return self
def referenceNamespace (self, namespace):
self.__referencedNamespaces.add(namespace)
return namespace
__referencedNamespaces = None
__constructedLocally = False
__PrivateTransient.add('constructedLocally')
def __init__ (self, namespace, generation_uid, **kw):
from pyxb.namespace import builtin
super(ModuleRecord, self).__init__()
self.__namespace = namespace
assert (generation_uid != builtin.BuiltInObjectUID) or namespace.isBuiltinNamespace()
self.__isPublic = kw.get('is_public', False)
self.__isIncorporated = kw.get('is_incorporated', False)
self.__isLoadable = kw.get('is_loadable', True)
assert isinstance(generation_uid, pyxb.utils.utility.UniqueIdentifier)
self.__generationUID = generation_uid
self.__modulePath = kw.get('module_path')
self.__originMap = {}
self.__referencedNamespaces = set()
self.__categoryObjects = { }
self.__constructedLocally = True
self.__dependsOnExternal = set()
def _setFromOther (self, other, archive):
if (not self.__constructedLocally) or other.__constructedLocally:
raise pyxb.ImplementationError('Module record update requires local to be updated from archive')
assert self.__generationUID == other.__generationUID
assert self.__archive is None
self.__isPublic = other.__isPublic
assert not self.__isIncorporated
self.__isLoadable = other.__isLoadable
self.__modulePath = other.__modulePath
self.__originMap.update(other.__originMap)
self.__referencedNamespaces.update(other.__referencedNamespaces)
if not (other.__categoryObjects is None):
self.__categoryObjects.update(other.__categoryObjects)
self.__dependsOnExternal.update(other.__dependsOnExternal)
self._setArchive(archive)
def categoryObjects (self):
return self.__categoryObjects
def resetCategoryObjects (self):
self.__categoryObjects.clear()
for origin in self.origins():
origin.resetCategoryMembers()
def _addCategoryObject (self, category, name, obj):
obj._prepareForArchive(self)
self.__categoryObjects.setdefault(category, {})[name] = obj
def _loadCategoryObjects (self, category_objects):
assert self.__categoryObjects is None
assert not self.__constructedLocally
ns = self.namespace()
ns.configureCategories(category_objects.iterkeys())
for (cat, obj_map) in category_objects.iteritems():
current_map = ns.categoryMap(cat)
for (local_name, component) in obj_map.iteritems():
existing_component = current_map.get(local_name)
if existing_component is None:
current_map[local_name] = component
elif existing_component._allowUpdateFromOther(component):
existing_component._updateFromOther(component)
else:
raise pyxb.NamespaceError(self, 'Load attempted to override %s %s in %s' % (cat, local_name, self.namespace()))
self.markIncorporated()
__categoryObjects = None
__PrivateTransient.add('categoryObjects')
def dependsOnExternal (self):
return self.__dependsOnExternal
__dependsOnExternal = None
def prepareForArchive (self, archive):
assert self.archive() is None
self._setArchive(archive)
ns = self.namespace()
self.__dependsOnExternal.clear()
for mr in ns.moduleRecords():
if mr != self:
_log.info('This gen depends on %s', mr)
self.__dependsOnExternal.add(mr.generationUID())
for obj in ns._namedObjects().union(ns.components()):
if isinstance(obj, _ArchivableObject_mixin):
if obj._objectOrigin():
obj._prepareForArchive(self)
def completeGenerationAssociations (self):
self.namespace()._transferReferencedNamespaces(self)
self.namespace()._associateOrigins(self)
def __str__ (self):
return 'MR[%s]@%s' % (self.generationUID(), self.namespace())
class _ObjectOrigin (pyxb.utils.utility.PrivateTransient_mixin, pyxb.cscRoot):
"""Marker class for objects that can serve as an origin for an object in a
namespace."""
__PrivateTransient = set()
def signature (self):
return self.__signature
__signature = None
def moduleRecord (self):
return self.__moduleRecord
__moduleRecord = None
def namespace (self):
return self.moduleRecord().namespace()
def generationUID (self):
return self.moduleRecord().generationUID()
def __init__ (self, namespace, generation_uid, **kw):
self.__signature = kw.pop('signature', None)
super(_ObjectOrigin, self).__init__(**kw)
self.__moduleRecord = namespace.lookupModuleRecordByUID(generation_uid, create_if_missing=True, **kw)
self.__moduleRecord.addOrigin(self)
self.__categoryMembers = { }
self.__categoryObjectMap = { }
def resetCategoryMembers (self):
self.__categoryMembers.clear()
self.__categoryObjectMap.clear()
self.__originatedComponents = None
def addCategoryMember (self, category, name, obj):
self.__categoryMembers.setdefault(category, set()).add(name)
self.__categoryObjectMap.setdefault(category, {})[name] = obj
self.__moduleRecord._addCategoryObject(category, name, obj)
def categoryMembers (self):
return self.__categoryMembers
def originatedObjects (self):
if self.__originatedObjects is None:
components = set()
[ components.update(_v.itervalues()) for _v in self.__categoryObjectMap.itervalues() ]
self.__originatedObjects = frozenset(components)
return self.__originatedObjects
# The set of category names associated with objects. Don't throw this
# away and use categoryObjectMap.keys() instead: that's transient, and we
# need this to have a value when read from an archive.
__categoryMembers = None
# Map from category name to a map from an object name to the object
__categoryObjectMap = None
__PrivateTransient.add('categoryObjectMap')
# The set of objects that originated at this origin
__originatedObjects = None
__PrivateTransient.add('originatedObjects')
class _SchemaOrigin (_ObjectOrigin):
"""Holds the data regarding components derived from a single schema.
Coupled to a particular namespace through the
L{_NamespaceComponentAssociation_mixin}.
"""
__PrivateTransient = set()
def __setDefaultKW (self, kw):
schema = kw.get('schema')
if schema is not None:
assert not ('location' in kw)
kw['location'] = schema.location()
assert not ('signature' in kw)
kw['signature'] = schema.signature()
assert not ('generation_uid' in kw)
kw['generation_uid'] = schema.generationUID()
assert not ('namespace' in kw)
kw['namespace'] = schema.targetNamespace()
assert not ('version' in kw)
kw['version'] = schema.schemaAttribute('version')
def match (self, **kw):
"""Determine whether this record matches the parameters.
@keyword schema: a L{pyxb.xmlschema.structures.Schema} instance from
which the other parameters are obtained.
@keyword location: a schema location (URI)
@keyword signature: a schema signature
@return: C{True} iff I{either} C{location} or C{signature} matches."""
self.__setDefaultKW(kw)
location = kw.get('location')
if (location is not None) and (self.location() == location):
return True
signature = kw.get('signature')
if (signature is not None) and (self.signature() == signature):
return True
return False
def location (self):
return self.__location
__location = None
def schema (self):
return self.__schema
__schema = None
__PrivateTransient.add('schema')
def version (self):
return self.__version
__version = None
def __init__ (self, **kw):
self.__setDefaultKW(kw)
self.__schema = kw.pop('schema', None)
self.__location = kw.pop('location', None)
self.__version = kw.pop('version', None)
super(_SchemaOrigin, self).__init__(kw.pop('namespace'), kw.pop('generation_uid'), **kw)
def __str__ (self):
rv = [ '_SchemaOrigin(%s@%s' % (self.namespace(), self.location()) ]
if self.version() is not None:
rv.append(',version=%s' % (self.version(),))
rv.append(')')
return ''.join(rv)
class NamespaceDependencies (object):
def rootNamespaces (self):
return self.__rootNamespaces
__rootNamespaces = None
def namespaceGraph (self, reset=False):
if reset or (self.__namespaceGraph is None):
self.__namespaceGraph = pyxb.utils.utility.Graph()
for ns in self.rootNamespaces():
self.__namespaceGraph.addRoot(ns)
# Make sure all referenced namespaces have valid components
need_check = self.__rootNamespaces.copy()
done_check = set()
while 0 < len(need_check):
ns = need_check.pop()
ns.validateComponentModel()
self.__namespaceGraph.addNode(ns)
for rns in ns.referencedNamespaces().union(ns.importedNamespaces()):
self.__namespaceGraph.addEdge(ns, rns)
if not rns in done_check:
need_check.add(rns)
if not ns.hasSchemaComponents():
_log.warning('Referenced %s has no schema components', ns.uri())
done_check.add(ns)
assert done_check == self.__namespaceGraph.nodes()
return self.__namespaceGraph
__namespaceGraph = None
def namespaceOrder (self, reset=False):
return self.namespaceGraph(reset).sccOrder()
def siblingsFromGraph (self, reset=False):
siblings = set()
ns_graph = self.namespaceGraph(reset)
for ns in self.__rootNamespaces:
ns_siblings = ns_graph.sccMap().get(ns)
if ns_siblings is not None:
siblings.update(ns_siblings)
else:
siblings.add(ns)
return siblings
def siblingNamespaces (self):
if self.__siblingNamespaces is None:
self.__siblingNamespaces = self.siblingsFromGraph()
return self.__siblingNamespaces
def setSiblingNamespaces (self, sibling_namespaces):
self.__siblingNamespaces = sibling_namespaces
__siblingNamespaces = None
def dependentNamespaces (self, reset=False):
return self.namespaceGraph(reset).nodes()
def componentGraph (self, reset=False):
if reset or (self.__componentGraph is None):
self.__componentGraph = pyxb.utils.utility.Graph()
all_components = set()
for ns in self.siblingNamespaces():
[ all_components.add(_c) for _c in ns.components() if _c.hasBinding() ]
need_visit = all_components.copy()
while 0 < len(need_visit):
c = need_visit.pop()
self.__componentGraph.addNode(c)
for cd in c.bindingRequires(include_lax=True):
if cd in all_components:
self.__componentGraph.addEdge(c, cd)
return self.__componentGraph
__componentGraph = None
def componentOrder (self, reset=False):
return self.componentGraph(reset).sccOrder()
def __init__ (self, **kw):
namespace_set = set(kw.get('namespace_set', []))
namespace = kw.get('namespace')
if namespace is not None:
namespace_set.add(namespace)
if 0 == len(namespace_set):
raise pyxb.LogicError('NamespaceDependencies requires at least one root namespace')
self.__rootNamespaces = namespace_set
## Local Variables:
## fill-column:78
## End:
|
sudheerchintala/LearnEraPlatForm
|
refs/heads/master
|
lms/djangoapps/branding/views.py
|
19
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import redirect
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
import student.views
import courseware.views
from microsite_configuration import microsite
from edxmako.shortcuts import marketing_link
from util.cache import cache_if_anonymous
@ensure_csrf_cookie
@cache_if_anonymous
def index(request):
'''
Redirects to main page -- info page if user authenticated, or marketing if not
'''
if settings.COURSEWARE_ENABLED and request.user.is_authenticated():
return redirect(reverse('dashboard'))
if settings.FEATURES.get('AUTH_USE_CERTIFICATES'):
from external_auth.views import ssl_login
# Set next URL to dashboard if it isn't set to avoid
# caching a redirect to / that causes a redirect loop on logout
if not request.GET.get('next'):
req_new = request.GET.copy()
req_new['next'] = reverse('dashboard')
request.GET = req_new
return ssl_login(request)
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(settings.MKTG_URLS.get('ROOT'))
domain = request.META.get('HTTP_HOST')
# keep specialized logic for Edge until we can migrate over Edge to fully use
# microsite definitions
if domain and 'edge.edx.org' in domain:
return redirect(reverse("signin_user"))
# we do not expect this case to be reached in cases where
# marketing and edge are enabled
return student.views.index(request, user=request.user)
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render the "find courses" page. If the marketing site is enabled, redirect
to that. Otherwise, if subdomain branding is on, this is the university
profile page. Otherwise, it's the edX courseware.views.courses page
"""
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(marketing_link('COURSES'), permanent=True)
if not settings.FEATURES.get('COURSES_ARE_BROWSABLE'):
raise Http404
# we do not expect this case to be reached in cases where
# marketing is enabled or the courses are not browsable
return courseware.views.courses(request)
|
hazelnusse/sympy-old
|
refs/heads/master
|
sympy/core/interval.py
|
3
|
from basic import Basic
from sympify import _sympify
class Interval(Basic):
def __new__(cls, start, end, **assumptions):
start = _sympify(start)
end = _sympify(end)
return Basic.__new__(cls, start, end, **assumptions)
@property
def start(self):
return self._args[0]
@property
def end(self):
return self._args[1]
|
taylorschimek/WhatAChore
|
refs/heads/master
|
useraccounts/models.py
|
1
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils.translation import ugettext_lazy as _
class MyUserManager(BaseUserManager):
"""
A custom user manager to deal with emails as unique identifiers for auth instead of usernames.
"""
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('The Email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True, null=True)
doNotEmail = models.BooleanField(default=False)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
welcomed = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = MyUserManager()
def __str__(self):
return self.email
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
|
YudinYury/Python_Netology_homework
|
refs/heads/master
|
less_3_3_classwork.py
|
1
|
"""lesson_3_3_classwork «Requests Lib and HTTP-requests»
"""
import chardet
import json
import os
import requests
import sys
def translate_it(text, lang='en-ru'):
"""
YANDEX translation plugin
docs: https://tech.yandex.ru/translate/doc/dg/reference/translate-docpage/
https://translate.yandex.net/api/v1.5/tr.json/translate ?
key=<API-ключ>
& text=<переводимый текст>
& lang=<направление перевода>
& [format=<формат текста>]
& [options=<опции перевода>]
& [callback=<имя callback-функции>]
:param text: <str> text for translation.
:return: <str> translated text.
"""
url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
key = 'trnsl.1.1.20161025T233221Z.47834a66fd7895d0.a95fd4bfde5c1794fa433453956bd261eae80152'
params = {
'key': key,
'lang': lang,
'text': text,
}
response_raw = requests.get(url, params=params)
# print('Body =', response_raw.content)
response = response_raw.json()
# response = requests.get(url, params=params).json()
return ' '.join(response.get('text', []))
def translate_file(source_file, destination_file, source_lang, destination_lang='ru'):
"""
translate file with YANDEX translation plugin
:param source_file: <str> source file name for translation
:param destination_file: <str> destination file name for translation
:param source_lang: <str> source language (from)
:param destination_lang: <str> destination language (to)
:return: None
"""
# lang = 'ru-en'
lang = source_lang + '-' + destination_lang
with open(source_file, 'rb') as f: # чтение байтовое
text = f.read()
result = chardet.detect(text)
source_text = text.decode(result['encoding'])
text_translated = translate_it(source_text, lang)
# print(text_translated)
with open(destination_file, 'w') as f:
f.write(text_translated)
return
def find_dir(dir_name):
_, dir_name = os.path.split(dir_name)
return dir_name in os.listdir()
def make_new_dir(dir_name):
if find_dir(dir_name):
pass
# print('dir_name already exist')
else:
os.mkdir(os.path.normpath(dir_name))
def main():
source_path = '3.2-requests'
source_lang = ''
destination_lang = 'ru'
source_dir_path = os.path.normpath(os.path.abspath(source_path))
destination_dir_path = os.path.normpath(source_dir_path + '_translated')
make_new_dir(destination_dir_path)
file_names_list = os.listdir(source_dir_path)
file_names_list = [x for x in file_names_list if x.endswith('.txt')]
for file_name in file_names_list:
source_lang, _ = file_name.split('.')
source_lang = source_lang.lower()
source_file = os.path.normpath(os.path.join(source_dir_path, file_name))
# print(source_file)
destination_file = os.path.normpath(os.path.join(destination_dir_path, file_name))
translate_file(source_file, destination_file, source_lang)
print('Результаты перевода сохранены в папке "{}"'.format(destination_dir_path))
if __name__ == '__main__':
main()
|
cerdmann-pivotal/azure-quickstart-templates
|
refs/heads/master
|
bosh-setup/scripts/setup_env.py
|
10
|
#!/usr/bin/env python
import json
import netaddr
import os
import random
import re
import requests
import sys
import base64
from azure.storage.blob import AppendBlobService
from azure.storage.table import TableService
import azure.mgmt.network
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.network import NetworkManagementClient, NetworkManagementClientConfiguration
def prepare_storage(settings):
default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"]
storage_access_key = settings["DEFAULT_STORAGE_ACCESS_KEY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container(
container_name='stemcell',
public_access='blob'
)
# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
table_service.create_table('stemcells')
# file_path: String. The path to the file in which some configs starting with 'REPLACE_WITH_' need to be replaced with the actual values.
# keys: Array. The keys indicate which configs should be replaced in the file.
# values: Dict. Key-value pairs indicate which configs should be replaced by what values.
def render_file(file_path, keys, values):
try:
with open(file_path, 'r') as tmpfile:
contents = tmpfile.read()
for key in keys:
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(values[key], contents)
with open(file_path, 'w') as tmpfile:
tmpfile.write(contents)
return True
except Exception as e:
print("render_file - {0}: {1}".format(file_path, e.strerror))
return False
def render_bosh_manifest(settings):
with open('bosh.pub', 'r') as tmpfile:
ssh_public_key = tmpfile.read().strip()
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_BOSH'])
gateway_ip = str(ip[1])
bosh_director_ip = str(ip[4])
ntp_servers_maps = {
"AzureCloud": "0.north-america.pool.ntp.org",
"AzureChinaCloud": "1.cn.pool.ntp.org, 1.asia.pool.ntp.org, 0.asia.pool.ntp.org",
"AzureUSGovernment": "0.north-america.pool.ntp.org"
}
environment = settings["ENVIRONMENT"]
ntp_servers = ntp_servers_maps[environment]
postgres_address_maps = {
"AzureCloud": "127.0.0.1",
"AzureChinaCloud": bosh_director_ip,
"AzureUSGovernment": "127.0.0.1"
}
postgres_address = postgres_address_maps[environment]
keys = [
"SUBNET_ADDRESS_RANGE_FOR_BOSH",
"SECONDARY_DNS",
"VNET_NAME",
"SUBNET_NAME_FOR_BOSH",
"DNS_RECURSOR",
"SUBSCRIPTION_ID",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"RESOURCE_GROUP_NAME",
"KEEP_UNREACHABLE_VMS",
"TENANT_ID",
"CLIENT_ID",
"CLIENT_SECRET",
"BOSH_PUBLIC_IP",
"NSG_NAME_FOR_BOSH",
"BOSH_RELEASE_URL",
"BOSH_RELEASE_SHA1",
"BOSH_AZURE_CPI_RELEASE_URL",
"BOSH_AZURE_CPI_RELEASE_SHA1",
"DYNAMIC_STEMCELL_URL",
"DYNAMIC_STEMCELL_SHA1",
"ENVIRONMENT",
"BOSH_VM_SIZE",
"SSH_PUBLIC_KEY",
"GATEWAY_IP",
"BOSH_DIRECTOR_IP",
"NTP_SERVERS",
"POSTGRES_ADDRESS"
]
values = settings.copy()
values["SSH_PUBLIC_KEY"] = ssh_public_key
values["GATEWAY_IP"] = gateway_ip
values["BOSH_DIRECTOR_IP"] = bosh_director_ip
values["NTP_SERVERS"] = ntp_servers
values["POSTGRES_ADDRESS"] = postgres_address
render_file("bosh.yml", keys, values)
return bosh_director_ip
def get_cloud_foundry_configuration(scenario, settings, bosh_director_ip):
dns_maps = {
"AzureCloud": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"]),
"AzureChinaCloud": bosh_director_ip,
"AzureUSGovernment": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"])
}
config = {}
config["DNS"] = dns_maps[settings["ENVIRONMENT"]]
config["SYSTEM_DOMAIN"] = "{0}.xip.io".format(settings["CLOUD_FOUNDRY_PUBLIC_IP"])
keys = [
"VNET_NAME",
"SUBNET_NAME_FOR_CLOUD_FOUNDRY",
"CLOUD_FOUNDRY_PUBLIC_IP",
"NSG_NAME_FOR_CLOUD_FOUNDRY",
"ENVIRONMENT",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"DEFAULT_STORAGE_ACCESS_KEY"
]
for key in keys:
config[key] = settings[key]
return config
def render_cloud_foundry_manifest(settings, bosh_director_ip):
for scenario in ["single-vm-cf", "multiple-vm-cf"]:
cloudfoundry_template = "{0}.yml".format(scenario)
config = get_cloud_foundry_configuration(scenario, settings, bosh_director_ip)
render_file(cloudfoundry_template, config.keys(), config)
def render_bosh_deployment_cmd(bosh_director_ip):
keys = ["BOSH_DIRECOT_IP"]
values = {}
values["BOSH_DIRECOT_IP"] = bosh_director_ip
render_file("deploy_bosh.sh", keys, values)
def render_cloud_foundry_deployment_cmd(settings):
keys = [
"STATIC_STEMCELL_URL",
"STATIC_STEMCELL_SHA1",
"STATIC_CF_RELEASE_URL",
"STATIC_CF_RELEASE_SHA1",
"STATIC_DIEGO_RELEASE_URL",
"STATIC_DIEGO_RELEASE_SHA1",
"STATIC_GARDEN_RELEASE_URL",
"STATIC_GARDEN_RELEASE_SHA1",
"STATIC_CFLINUXFS2_RELEASE_URL",
"STATIC_CFLINUXFS2_RELEASE_SHA1",
"DYNAMIC_STEMCELL_URL",
"DYNAMIC_STEMCELL_SHA1",
"DYNAMIC_CF_RELEASE_URL",
"DYNAMIC_CF_RELEASE_SHA1",
"DYNAMIC_DIEGO_RELEASE_URL",
"DYNAMIC_DIEGO_RELEASE_SHA1",
"DYNAMIC_GARDEN_RELEASE_URL",
"DYNAMIC_GARDEN_RELEASE_SHA1",
"DYNAMIC_CFLINUXFS2_RELEASE_URL",
"DYNAMIC_CFLINUXFS2_RELEASE_SHA1"
]
render_file("deploy_cloudfoundry.sh", keys, settings)
def get_settings():
settings = dict()
config_file = sys.argv[4]
with open(config_file) as f:
settings = json.load(f)
settings['TENANT_ID'] = sys.argv[1]
settings['CLIENT_ID'] = sys.argv[2]
settings['CLIENT_SECRET'] = base64.b64decode(sys.argv[3])
print "tenant_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['TENANT_ID'][0:4])
print "client_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['CLIENT_ID'][0:4])
print "The length of client_secret is {0}".format(len(settings['CLIENT_SECRET']))
return settings
def main():
settings = get_settings()
with open('settings', "w") as tmpfile:
tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
prepare_storage(settings)
bosh_director_ip = render_bosh_manifest(settings)
render_bosh_deployment_cmd(bosh_director_ip)
render_cloud_foundry_manifest(settings, bosh_director_ip)
render_cloud_foundry_deployment_cmd(settings)
if __name__ == "__main__":
main()
|
v4hn/ecto
|
refs/heads/master
|
python/ecto/sphinx/breathe/renderer/rst/doxygen/index.py
|
1
|
from ecto.sphinx.breathe.renderer.rst.doxygen.base import Renderer
class DoxygenTypeSubRenderer(Renderer):
def render(self):
nodelist = []
# Process all the compound children
for compound in self.data_object.get_compound():
compound_renderer = self.renderer_factory.create_renderer(self.data_object, compound)
nodelist.extend(compound_renderer.render())
return nodelist
class CompoundTypeSubRenderer(Renderer):
def __init__(self, compound_parser, *args):
Renderer.__init__(self, *args)
self.compound_parser = compound_parser
def create_target(self, refid):
return self.target_handler.create_target(refid)
def create_domain_id(self):
return ""
def render(self):
refid = "%s%s" % (self.project_info.name(), self.data_object.refid)
nodelist = self.create_target(refid)
domain_id = self.create_domain_id()
# Read in the corresponding xml file and process
file_data = self.compound_parser.parse(self.data_object.refid)
lines = []
# Check if there is template information and format it as desired
if file_data.compounddef.templateparamlist:
renderer = self.renderer_factory.create_renderer(
file_data.compounddef,
file_data.compounddef.templateparamlist
)
template = [
self.node_factory.Text("template < ")
]
template.extend(renderer.render())
template.append(self.node_factory.Text(" >"))
lines.append(self.node_factory.line("", *template))
# Set up the title and a reference for it (refid)
kind = self.node_factory.emphasis(text=self.data_object.kind)
name = self.node_factory.strong(text=self.data_object.name)
# Add blank string at the start otherwise for some reason it renders
# the emphasis tags around the kind in plain text
lines.append(
self.node_factory.line(
"",
self.node_factory.Text(""),
kind,
self.node_factory.Text(" "),
name
)
)
if file_data.compounddef.includes:
for include in file_data.compounddef.includes:
renderer = self.renderer_factory.create_renderer(
file_data.compounddef,
include
)
result = renderer.render()
if result:
lines.append(
self.node_factory.line(
"",
self.node_factory.Text(""),
*result
)
)
nodelist.append(
self.node_factory.line_block(
"",
*lines
)
)
data_renderer = self.renderer_factory.create_renderer(self.data_object, file_data)
nodelist.extend(data_renderer.render())
return nodelist
class ClassCompoundTypeSubRenderer(CompoundTypeSubRenderer):
def create_target(self, refid):
self.domain_handler.create_class_target(self.data_object)
return CompoundTypeSubRenderer.create_target(self, refid)
def create_domain_id(self):
return self.domain_handler.create_class_id(self.data_object)
|
ComNets-Bremen/Python-Course
|
refs/heads/master
|
examples/09_enumerateExample.py
|
1
|
#!/usr/bin/env python3
"""
Enumerate an object
Jens Dede, 2019, jd@comnets.uni-bremen.de
"""
objects = ["Apple", "Lemon", "Banana"]
for o in objects:
print(o)
# I know the object but not the position / number...
print(5*"*")
# Enumerate returns the position (i.e. an integer) and the object
for n, o in enumerate(objects):
print("Pos", n, "Object", o)
if n==1:
print("After second entry!!")
|
SAM-IT-SA/odoo
|
refs/heads/8.0
|
addons/pad_project/project_task.py
|
433
|
# -*- coding: utf-8 -*-
from openerp.tools.translate import _
from openerp.osv import fields, osv
class task(osv.osv):
_name = "project.task"
_inherit = ["project.task",'pad.common']
_columns = {
'description_pad': fields.char('Description PAD', pad_content_field='description')
}
|
kidaa/avmplus
|
refs/heads/master
|
generate.py
|
8
|
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil -*-
# vi: set ts=4 sw=4 expandtab:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# generate.py - generate all exactgc tracers
import sys,os
rootdir = os.path.dirname(__file__)
sys.path.append(rootdir)
import utils.exactgc
utils.exactgc.gen_builtins(rootdir + '/generated')
utils.exactgc.gen_shell(rootdir + '/generated')
|
waynenilsen/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/tools/mctools.py
|
33
|
'''Helper class for Monte Carlo Studies for (currently) statistical tests
Most of it should also be usable for Bootstrap, and for MC for estimators.
Takes the sample generator, dgb, and the statistical results, statistic,
as functions in the argument.
Author: Josef Perktold (josef-pktd)
License: BSD-3
TODOs, Design
-------------
If we only care about univariate analysis, i.e. marginal if statistics returns
more than one value, the we only need to store the sorted mcres not the
original res. Do we want to extend to multivariate analysis?
Use distribution function to keep track of MC results, ECDF, non-paramatric?
Large parts are similar to a 2d array of independent multivariate random
variables. Joint distribution is not used (yet).
I guess this is currently only for one sided test statistics, e.g. for
two-sided tests basend on t or normal distribution use the absolute value.
'''
from __future__ import print_function
from statsmodels.compat.python import lrange
import numpy as np
from statsmodels.iolib.table import SimpleTable
#copied from stattools
class StatTestMC(object):
"""class to run Monte Carlo study on a statistical test'''
TODO
print(summary, for quantiles and for histogram
draft in trying out script log
Parameters
----------
dgp : callable
Function that generates the data to be used in Monte Carlo that should
return a new sample with each call
statistic : callable
Function that calculates the test statistic, which can return either
a single statistic or a 1d array_like (tuple, list, ndarray).
see also statindices in description of run
Attributes
----------
many methods store intermediate results
self.mcres : ndarray (nrepl, nreturns) or (nrepl, len(statindices))
Monte Carlo results stored by run
Notes
-----
.. Warning::
This is (currently) designed for a single call to run. If run is
called a second time with different arguments, then some attributes might
not be updated, and, therefore, not correspond to the same run.
.. Warning::
Under Construction, don't expect stability in Api or implementation
Examples
--------
Define a function that defines our test statistic:
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
Note lb returns eight values.
Define a random sample generator, for example 500 independently, normal
distributed observations in a sample:
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
Create instance and run Monte Carlo. Using statindices=list(range(4)) means that
only the first for values of the return of the statistic (lb) are stored
in the Monte Carlo results.
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=list(range(4)))
Most of the other methods take an idx which indicates for which columns
the results should be presented, e.g.
print(mc1.cdf(crit, [1,2,3])[1]
"""
def __init__(self, dgp, statistic):
self.dgp = dgp #staticmethod(dgp) #no self
self.statistic = statistic # staticmethod(statistic) #no self
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
Parameters
----------
nrepl : int
number of Monte Carlo repetitions
statindices : None or list of integers
determines which values of the return of the statistic
functions are stored in the Monte Carlo. Default None
means the entire return. If statindices is a list of
integers, then it will be used as index into the return.
dgpargs : tuple
optional parameters for the DGP
statsargs : tuple
optional parameters for the statistics function
Returns
-------
None, all results are attached
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#introspect len of return of statfun,
#possible problems with ndim>1, check ValueError
mcres0 = statfun(dgp(*dgpargs), *statsargs)
self.nreturn = nreturns = len(np.ravel(mcres0))
#single return statistic
if statindices is None:
#self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
mcres[0] = mcres0
for ii in range(1, repl-1, nreturns):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
#should I ravel?
mcres[ii] = statfun(x, *statsargs) #unitroot_adf(x, 2,trendorder=0, autolag=None)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
mcres[0] = [mcres0[i] for i in statindices]
for ii in range(1, nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
I don't remember what I wanted here, looks similar to the new cdf
method, but this also does a binned pdf (self.histo)
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed
#use cache decorator instead
def get_mc_sorted(self):
if not hasattr(self, 'mcressort'):
self.mcressort = np.sort(self.mcres, axis=0)
return self.mcressort
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
similar to ppf
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
frac : ndarray
same values as input, TODO: I should drop this again ?
quantiles : ndarray, (len(frac), len(idx))
the quantiles with frac in rows and idx variables in columns
Notes
-----
rename to ppf ? make frac required
change sequence idx, frac
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
self.frac = frac = np.asarray(frac)
mc_sorted = self.get_mc_sorted()[:,idx]
return frac, mc_sorted[(self.nrepl*frac).astype(int)]
def cdf(self, x, idx=None):
'''calculate cumulative probabilities of Monte Carlo results
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
x : ndarray
same as input, TODO: I should drop this again ?
probs : ndarray, (len(x), len(idx))
the quantiles with frac in rows and idx variables in columns
'''
idx = np.atleast_1d(idx).tolist() #assure iterable, use list ?
# if self.mcres.ndim == 2:
# if not idx is None:
# mcres = self.mcres[:,idx]
# else:
# raise ValueError('currently only 1 statistic at a time')
# else:
# mcres = self.mcres
mc_sorted = self.get_mc_sorted()
x = np.asarray(x)
#TODO:autodetect or explicit option ?
if x.ndim > 1 and x.shape[1]==len(idx):
use_xi = True
else:
use_xi = False
x_ = x #alias
probs = []
for i,ix in enumerate(idx):
if use_xi:
x_ = x[:,i]
probs.append(np.searchsorted(mc_sorted[:,ix], x_)/float(self.nrepl))
probs = np.asarray(probs).T
return x, probs
def plot_hist(self, idx, distpdf=None, bins=50, ax=None, kwds=None):
'''plot the histogram against a reference distribution
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distpdf : callable
probability density function of reference distribution
bins : integer or array_like
used unchanged for matplotlibs hist call
ax : TODO: not implemented yet
kwds : None or tuple of dicts
extra keyword options to the calls to the matplotlib functions,
first dictionary is for his, second dictionary for plot of the
reference distribution
Returns
-------
None
'''
if kwds is None:
kwds = ({},{})
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
lsp = np.linspace(mcres.min(), mcres.max(), 100)
import matplotlib.pyplot as plt
#I don't want to figure this out now
# if ax=None:
# fig = plt.figure()
# ax = fig.addaxis()
fig = plt.figure()
plt.hist(mcres, bins=bins, normed=True, **kwds[0])
plt.plot(lsp, distpdf(lsp), 'r', **kwds[1])
def summary_quantiles(self, idx, distppf, frac=[0.01, 0.025, 0.05, 0.1, 0.975],
varnames=None, title=None):
'''summary table for quantiles (critical values)
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distppf : callable
probability density function of reference distribution
TODO: use `crit` values instead or additional, see summary_cdf
frac : array_like, float
probabilities for which
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
quant, mcq = self.quantiles(idx, frac=frac)
#not sure whether this will work with single quantile
#crit = stats.chi2([2,4]).ppf(np.atleast_2d(quant).T)
crit = distppf(np.atleast_2d(quant).T)
mml=[]
for i, ix in enumerate(idx): #TODO: hardcoded 2 ?
mml.extend([mcq[:,i], crit[:,i]])
#mmlar = np.column_stack(mml)
mmlar = np.column_stack([quant] + mml)
#print(mmlar.shape
if title:
title = title +' Quantiles (critical values)'
else:
title='Quantiles (critical values)'
#TODO use stub instead
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]//2)]
headers = ['\nprob'] + ['%s\n%s' % (i, t) for i in varnames for t in ['mc', 'dist']]
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(mmlar.shape[1]-1)},
title=title,
headers=headers)
def summary_cdf(self, idx, frac, crit, varnames=None, title=None):
'''summary table for cumulative density function
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
probabilities for which
crit : array_like
values for which cdf is calculated
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
mml=[]
#TODO:need broadcasting in cdf
for i in range(len(idx)):
#print(i, mc1.cdf(crit[:,i], [idx[i]])[1].ravel()
mml.append(self.cdf(crit[:,i], [idx[i]])[1].ravel())
#mml = self.cdf(crit, idx)[1]
#mmlar = np.column_stack(mml)
#print(mml[0].shape, np.shape(frac)
mmlar = np.column_stack([frac] + mml)
#print(mmlar.shape
if title:
title = title +' Probabilites'
else:
title='Probabilities'
#TODO use stub instead
#headers = ['\nprob'] + ['var%d\n%s' % (i, t) for i in range(mmlar.shape[1]-1) for t in ['mc']]
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]-1)]
headers = ['prob'] + varnames
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(np.array(mml).shape[1]-1)},
title=title,
headers=headers)
if __name__ == '__main__':
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from statsmodels.sandbox.stats.diagnostic import (
acorr_ljungbox, unitroot_adf)
def randwalksim(nobs=100, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def adf20(x):
return unitroot_adf(x, 2,trendorder=0, autolag=None)
# print('\nResults with MC class'
# mc1 = StatTestMC(randwalksim, adf20)
# mc1.run(1000)
# print(mc1.histogram(critval=[-3.5, -3.17, -2.9 , -2.58, 0.26])
# print(mc1.quantiles()
print('\nLjung Box')
from statsmodels.sandbox.stats.diagnostic import acorr_ljungbox
def lb4(x):
s,p = acorr_ljungbox(x, lags=4)
return s[-1], p[-1]
def lb1(x):
s,p = acorr_ljungbox(x, lags=1)
return s[0], p[0]
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
print('Results with MC class')
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(10000, statindices=lrange(8))
print(mc1.histogram(1, critval=[0.01, 0.025, 0.05, 0.1, 0.975]))
print(mc1.quantiles(1))
print(mc1.quantiles(0))
print(mc1.histogram(0))
#print(mc1.summary_quantiles([1], stats.chi2([2]).ppf, title='acorr_ljungbox')
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(0.1026, 1))
print(mc1.cdf(0.7278, 3))
print(mc1.cdf(0.7278, [1,2,3]))
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,3], frac, crit, title='acorr_ljungbox'))
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1].shape)
#fixed broadcasting in cdf Done 2d only
'''
>>> mc1.cdf(crit[:,0], [1])[1].shape
(5, 1)
>>> mc1.cdf(crit[:,0], [1,3])[1].shape
(5, 2)
>>> mc1.cdf(crit[:,:], [1,3])[1].shape
(2, 5, 2)
'''
doplot=0
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist(0,stats.chi2(2).pdf) #which pdf
plt.show()
|
andim27/magiccamp
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/introspection.py
|
401
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
mementum/bfpy
|
refs/heads/master
|
src/httxlib/httxobject.py
|
4
|
#!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# This file is part of HttxLib
#
# HttxLib is an HTTP(s) Python library suited multithreaded/multidomain
# applications
#
# Copyright (C) 2010-2011 Daniel Rodriguez (aka Daniel Rodriksson)
# Copyright (C) 2011 Sensible Odds Ltd
#
# You can learn more and contact the author at:
#
# http://code.google.com/p/httxlib/
#
# HttxLib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HttxLib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HttxLib. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
'''
Base httxlib object that ensures all objects receive a Lock
'''
try:
from threading import Lock
except:
from dummy_threading import Lock
class HttxObject(object):
'''
Base HttxLib object to provide threading synchronization
via means of an automatically instantiated lock object
@ivar lock: The Lock object.
@type lock: threading.Lock
'''
def __init__(self):
'''
Constructor
It initializes a threading lock
'''
self.lock = Lock()
|
t0mk/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_quotas.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_quotas
short_description: Module to manage datacenter quotas in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage datacenter quotas in oVirt"
options:
name:
description:
- "Name of the the quota to manage."
required: true
state:
description:
- "Should the quota be present/absent."
choices: ['present', 'absent']
default: present
datacenter:
description:
- "Name of the datacenter where quota should be managed."
required: true
description:
description:
- "Description of the the quota to manage."
cluster_threshold:
description:
- "Cluster threshold(soft limit) defined in percentage (0-100)."
cluster_grace:
description:
- "Cluster grace(hard limit) defined in percentage (1-100)."
storage_threshold:
description:
- "Storage threshold(soft limit) defined in percentage (0-100)."
storage_grace:
description:
- "Storage grace(hard limit) defined in percentage (1-100)."
clusters:
description:
- "List of dictionary of cluster limits, which is valid to specific cluster."
- "If cluster isn't spefied it's valid to all clusters in system:"
- "C(cluster) - Name of the cluster."
- "C(memory) - Memory limit (in GiB)."
- "C(cpu) - CPU limit."
storages:
description:
- "List of dictionary of storage limits, which is valid to specific storage."
- "If storage isn't spefied it's valid to all storages in system:"
- "C(storage) - Name of the storage."
- "C(size) - Size limit (in GiB)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add cluster quota to cluster cluster1 with memory limit 20GiB and CPU limit to 10:
ovirt_quotas:
name: quota1
datacenter: dcX
clusters:
- name: cluster1
memory: 20
cpu: 10
# Add cluster quota to all clusters with memory limit 30GiB and CPU limit to 15:
ovirt_quotas:
name: quota2
datacenter: dcX
clusters:
- memory: 30
cpu: 15
# Add storage quota to storage data1 with size limit to 100GiB
ovirt_quotas:
name: quota3
datacenter: dcX
storage_grace: 40
storage_threshold: 60
storages:
- name: data1
size: 100
# Remove quota quota1 (Note the quota must not be assigned to any VM/disk):
ovirt_quotas:
state: absent
datacenter: dcX
name: quota1
'''
RETURN = '''
id:
description: ID of the quota which is managed
returned: On success if quota is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
quota:
description: "Dictionary of all the quota attributes. Quota attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/quota."
returned: On success if quota is found.
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class QuotasModule(BaseModule):
def build_entity(self):
return otypes.Quota(
description=self._module.params['description'],
name=self._module.params['name'],
storage_hard_limit_pct=self._module.params.get('storage_grace'),
storage_soft_limit_pct=self._module.params.get('storage_threshold'),
cluster_hard_limit_pct=self._module.params.get('cluster_grace'),
cluster_soft_limit_pct=self._module.params.get('cluster_threshold'),
)
def update_storage_limits(self, entity):
new_limits = {}
for storage in self._module.params.get('storages'):
new_limits[storage.get('name', '')] = {
'size': storage.get('size'),
}
old_limits = {}
sd_limit_service = self._service.service(entity.id).quota_storage_limits_service()
for limit in sd_limit_service.list():
storage = get_link_name(self._connection, limit.storage_domain) if limit.storage_domain else ''
old_limits[storage] = {
'size': limit.limit,
}
sd_limit_service.service(limit.id).remove()
return new_limits == old_limits
def update_cluster_limits(self, entity):
new_limits = {}
for cluster in self._module.params.get('clusters'):
new_limits[cluster.get('name', '')] = {
'cpu': cluster.get('cpu'),
'memory': float(cluster.get('memory')),
}
old_limits = {}
cl_limit_service = self._service.service(entity.id).quota_cluster_limits_service()
for limit in cl_limit_service.list():
cluster = get_link_name(self._connection, limit.cluster) if limit.cluster else ''
old_limits[cluster] = {
'cpu': limit.vcpu_limit,
'memory': limit.memory_limit,
}
cl_limit_service.service(limit.id).remove()
return new_limits == old_limits
def update_check(self, entity):
# -- FIXME --
# Note that we here always remove all cluster/storage limits, because
# it's not currently possible to update them and then re-create the limits
# appropriatelly, this shouldn't have any side-effects, but it's not considered
# as a correct approach.
# This feature is tracked here: https://bugzilla.redhat.com/show_bug.cgi?id=1398576
#
return (
self.update_storage_limits(entity) and
self.update_cluster_limits(entity) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('storage_grace'), entity.storage_hard_limit_pct) and
equal(self._module.params.get('storage_threshold'), entity.storage_soft_limit_pct) and
equal(self._module.params.get('cluster_grace'), entity.cluster_hard_limit_pct) and
equal(self._module.params.get('cluster_threshold'), entity.cluster_soft_limit_pct)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
datacenter=dict(required=True),
description=dict(default=None),
cluster_threshold=dict(default=None, type='int', aliases=['cluster_soft_limit']),
cluster_grace=dict(default=None, type='int', aliases=['cluster_hard_limit']),
storage_threshold=dict(default=None, type='int', aliases=['storage_soft_limit']),
storage_grace=dict(default=None, type='int', aliases=['storage_hard_limit']),
clusters=dict(default=[], type='list'),
storages=dict(default=[], type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
datacenters_service = connection.system_service().data_centers_service()
dc_name = module.params['datacenter']
dc_id = getattr(search_by_name(datacenters_service, dc_name), 'id', None)
if dc_id is None:
raise Exception("Datacenter '%s' was not found." % dc_name)
quotas_service = datacenters_service.service(dc_id).quotas_service()
quotas_module = QuotasModule(
connection=connection,
module=module,
service=quotas_service,
)
state = module.params['state']
if state == 'present':
ret = quotas_module.create()
# Manage cluster limits:
cl_limit_service = quotas_service.service(ret['id']).quota_cluster_limits_service()
for cluster in module.params.get('clusters'):
cl_limit_service.add(
limit=otypes.QuotaClusterLimit(
memory_limit=float(cluster.get('memory')),
vcpu_limit=cluster.get('cpu'),
cluster=search_by_name(
connection.system_service().clusters_service(),
cluster.get('name')
),
),
)
# Manage storage limits:
sd_limit_service = quotas_service.service(ret['id']).quota_storage_limits_service()
for storage in module.params.get('storages'):
sd_limit_service.add(
limit=otypes.QuotaStorageLimit(
limit=storage.get('size'),
storage_domain=search_by_name(
connection.system_service().storage_domains_service(),
storage.get('name')
),
)
)
elif state == 'absent':
ret = quotas_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
kou/zulip
|
refs/heads/master
|
zerver/migrations/0125_realm_max_invites.py
|
7
|
# Generated by Django 1.11.6 on 2017-11-30 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0124_stream_enable_notifications'),
]
operations = [
migrations.AddField(
model_name='realm',
name='max_invites',
field=models.IntegerField(default=100),
),
]
|
khagler/boto
|
refs/heads/develop
|
tests/integration/cloudsearch2/__init__.py
|
645
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
|
everypony/ponyFiction
|
refs/heads/development
|
config/local/python/app.py
|
1
|
# -*- coding: utf-8 -*-
from ponyFiction.settings.base import *
INSTALLED_APPS += ('debug_toolbar',)
JQUERY_URL = ''
# Captcha
RECAPTCHA_PUBLIC_KEY = '6Ld1_QgUAAAAAMh-JiWgux_6CERc4aATQs0iK-J2'
RECAPTCHA_PRIVATE_KEY = '6Ld1_QgUAAAAAAAAmZSDhjvskUNHFsZniIdwkn5S'
# Templates
TEMPLATES[0]['OPTIONS']['loaders'] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
|
liuyang-li/buck
|
refs/heads/master
|
third-party/py/twitter-commons/src/python/twitter/common/python/pex_builder.py
|
5
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import absolute_import
import logging
import os
import tempfile
from .compatibility import to_bytes
from .common import chmod_plus_x, open_zip, safe_mkdir, Chroot
from .interpreter import PythonInterpreter
from .marshaller import CodeMarshaller
from .pex_info import PexInfo
from .tracer import TRACER
from .util import CacheHelper, DistributionHelper
from pkg_resources import (
DefaultProvider,
ZipProvider,
get_provider,
)
BOOTSTRAP_ENVIRONMENT = b"""
import os
import sys
__entry_point__ = None
if '__file__' in locals() and __file__ is not None:
__entry_point__ = os.path.dirname(__file__)
elif '__loader__' in locals():
from zipimport import zipimporter
from pkgutil import ImpLoader
if hasattr(__loader__, 'archive'):
__entry_point__ = __loader__.archive
elif isinstance(__loader__, ImpLoader):
__entry_point__ = os.path.dirname(__loader__.get_filename())
if __entry_point__ is None:
sys.stderr.write('Could not launch python executable!\\n')
sys.exit(2)
sys.path[0] = os.path.abspath(sys.path[0])
sys.path.insert(0, os.path.abspath(os.path.join(__entry_point__, '.bootstrap')))
from _twitter_common_python.pex_bootstrapper import bootstrap_pex
bootstrap_pex(__entry_point__)
"""
class PEXBuilder(object):
class InvalidDependency(Exception): pass
class InvalidExecutableSpecification(Exception): pass
BOOTSTRAP_DIR = ".bootstrap"
def __init__(self, path=None, interpreter=None, chroot=None, pex_info=None):
self._chroot = chroot or Chroot(path or tempfile.mkdtemp())
self._pex_info = pex_info or PexInfo.default()
self._frozen = False
self._interpreter = interpreter or PythonInterpreter.get()
self._logger = logging.getLogger(__name__)
@property
def interpreter(self):
return self._interpreter
def chroot(self):
return self._chroot
def clone(self, into=None):
chroot_clone = self._chroot.clone(into=into)
return PEXBuilder(chroot=chroot_clone, interpreter=self._interpreter,
pex_info=self._pex_info.copy())
def path(self):
return self.chroot().path()
@property
def info(self):
return self._pex_info
@info.setter
def info(self, value):
if not isinstance(value, PexInfo):
raise TypeError('PEXBuilder.info must be a PexInfo!')
self._pex_info = value
def add_source(self, filename, env_filename):
self._chroot.link(filename, env_filename, "source")
if filename.endswith('.py'):
env_filename_pyc = os.path.splitext(env_filename)[0] + '.pyc'
with open(filename) as fp:
pyc_object = CodeMarshaller.from_py(fp.read(), env_filename)
self._chroot.write(pyc_object.to_pyc(), env_filename_pyc, 'source')
def add_resource(self, filename, env_filename):
self._chroot.link(filename, env_filename, "resource")
def add_requirement(self, req, dynamic=False, repo=None):
self._pex_info.add_requirement(req, repo=repo, dynamic=dynamic)
def set_entry_point(self, entry_point):
self.info.entry_point = entry_point
def add_dist_location(self, bdist):
dist = DistributionHelper.distribution_from_path(bdist)
self.add_distribution(dist)
self.add_requirement(dist.as_requirement(), dynamic=False, repo=None)
def add_egg(self, egg):
return self.add_dist_location(egg)
def _add_dist_dir(self, path, dist_name):
for root, _, files in os.walk(path):
for f in files:
filename = os.path.join(root, f)
relpath = os.path.relpath(filename, path)
target = os.path.join(self._pex_info.internal_cache, dist_name, relpath)
self._chroot.link(filename, target)
return CacheHelper.dir_hash(path)
def _add_dist_zip(self, path, dist_name):
with open_zip(path) as zf:
for name in zf.namelist():
if name.endswith('/'):
continue
target = os.path.join(self._pex_info.internal_cache, dist_name, name)
self._chroot.write(zf.read(name), target)
return CacheHelper.zip_hash(zf)
def _prepare_code_hash(self):
self._pex_info.code_hash = CacheHelper.pex_hash(self._chroot.path())
def add_distribution(self, dist, dist_name=None):
dist_name = dist_name or os.path.basename(dist.location)
if os.path.isdir(dist.location):
dist_hash = self._add_dist_dir(dist.location, dist_name)
else:
dist_hash = self._add_dist_zip(dist.location, dist_name)
# add dependency key so that it can rapidly be retrieved from cache
self._pex_info.add_distribution(dist_name, dist_hash)
def set_executable(self, filename, env_filename=None):
if env_filename is None:
env_filename = os.path.basename(filename)
if self._chroot.get("executable"):
raise PEXBuilder.InvalidExecutableSpecification(
"Setting executable on a PEXBuilder that already has one!")
self._chroot.link(filename, env_filename, "executable")
entry_point = env_filename
entry_point.replace(os.path.sep, '.')
self._pex_info.entry_point = entry_point.rpartition('.')[0]
# TODO(wickman) Consider changing this behavior to put the onus on the consumer
# of twitter.common.python to write the pex sources correctly.
def _prepare_inits(self):
relative_digest = self._chroot.get("source")
init_digest = set()
for path in relative_digest:
split_path = path.split(os.path.sep)
for k in range(1, len(split_path)):
sub_path = os.path.sep.join(split_path[0:k] + ['__init__.py'])
if sub_path not in relative_digest and sub_path not in init_digest:
self._chroot.write("__import__('pkg_resources').declare_namespace(__name__)",
sub_path)
init_digest.add(sub_path)
def _prepare_manifest(self):
self._chroot.write(self._pex_info.dump().encode('utf-8'), PexInfo.PATH, label='manifest')
def _prepare_main(self):
self._chroot.write(BOOTSTRAP_ENVIRONMENT, '__main__.py', label='main')
# TODO(wickman) Ideally we unqualify our setuptools dependency and inherit whatever is
# bundled into the environment so long as it is compatible (and error out if not.)
#
# As it stands, we're picking and choosing the pieces we think we need, which means
# if there are bits of setuptools imported from elsewhere they may be incompatible with
# this.
def _prepare_bootstrap(self):
"""
Write enough of distribute into the .pex .bootstrap directory so that
we can be fully self-contained.
"""
libraries = (
'twitter.common.python',
'twitter.common.python.http',
)
for name in libraries:
dirname = name.replace('twitter.common.python', '_twitter_common_python').replace('.', '/')
provider = get_provider(name)
if not isinstance(provider, DefaultProvider):
mod = __import__(name, fromlist=['wutttt'])
provider = ZipProvider(mod)
for fn in provider.resource_listdir(''):
if fn.endswith('.py'):
self._chroot.write(provider.get_resource_string(name, fn),
os.path.join(self.BOOTSTRAP_DIR, dirname, fn), 'resource')
def freeze(self):
if self._frozen:
return
self._prepare_inits()
self._prepare_code_hash()
self._prepare_manifest()
self._prepare_bootstrap()
self._prepare_main()
self._frozen = True
def build(self, filename):
self.freeze()
try:
os.unlink(filename + '~')
self._logger.warn('Previous binary unexpectedly exists, cleaning: %s' % (filename + '~'))
except OSError:
# The expectation is that the file does not exist, so continue
pass
if os.path.dirname(filename):
safe_mkdir(os.path.dirname(filename))
with open(filename + '~', 'ab') as pexfile:
assert os.path.getsize(pexfile.name) == 0
pexfile.write(to_bytes('%s\n' % self._interpreter.identity.hashbang()))
self._chroot.zip(filename + '~', mode='a')
if os.path.exists(filename):
os.unlink(filename)
os.rename(filename + '~', filename)
chmod_plus_x(filename)
|
MQQiang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_file.py
|
83
|
import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest
from collections import UserList
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((OSError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(OSError, self.f.read)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
open = io.open
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
class OtherFileTests:
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except OSError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests, unittest.TestCase):
open = io.open
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
def tearDownModule():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
unittest.main()
|
gmist/kay-ru
|
refs/heads/master
|
kay/management/shell.py
|
3
|
# -*- coding: utf-8 -*-
"""
Kay remote shell management command.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import os.path
import sys
import time
import getpass
import logging
import threading
import Queue
import signal
import atexit
try:
import readline
import rlcompleter
except ImportError:
readline = None
from werkzeug.utils import import_string
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
import kay
from kay.conf import settings
from kay.utils.repr import dump
from kay.utils.decorators import retry_on_timeout
from kay.misc import get_appid
from kay.misc import get_datastore_paths
from kay.management.utils import print_status
THREAD_NUM = 20
HISTORY_PATH = os.path.expanduser('~/.kay_shell_history')
def get_all_models_as_dict(only_polymodel_base=False):
ret = {}
apps = []
app = kay.app.get_application()
apps.append(app.app)
for key, submount_app in app.mounts.iteritems():
if not hasattr(submount_app, 'app_settings') or key == "/_kay":
continue
apps.append(submount_app)
for kay_app in apps:
for app in kay_app.app_settings.INSTALLED_APPS:
try:
mod = import_string("%s.models" % app)
except (ImportError, AttributeError), e:
logging.debug("Failed to import model of an app '%s': '%s', skipped."
% (app, e))
continue
for name, c in mod.__dict__.iteritems():
try:
if issubclass(c, db.Model):
if c in ret.values():
continue
if only_polymodel_base and \
issubclass(c, db.polymodel.PolyModel) and \
c.__base__ is not db.polymodel.PolyModel:
continue
while ret.has_key(name):
name = name + '_'
ret[name] = c
except TypeError:
pass
return ret
def auth_func():
return raw_input('Username:'), getpass.getpass('Password:')
class JobManager(object):
def __init__(self, models):
self.queue = Queue.Queue()
self.finished = dict([[model.kind(), False] for model in models])
self.counts = dict([[model.kind(), 0] for model in models])
self.unhandled_counts = dict([[model.kind(), 0] for model in models])
def add(self, model, job):
self.queue.put((model, job))
self.counts[model.kind()] += len(job)
def set_ready(self, model):
self.finished[model.kind()] = True
@property
def finished_collecting(self):
for finished in self.finished.values():
if not finished:
return False
return True
def report_result(self):
for kind, count in self.counts.iteritems():
sys.stderr.write("Collected %d of %s.\n" % (count, kind))
while not self.queue.empty():
try:
unused_item = self.queue.get_nowait()
self.queue.task_done()
model, job = unused_item
self.unhandled_counts[model.kind()] += len(job)
except Queue.Empty:
pass
for kind, count in self.unhandled_counts.iteritems():
if count != 0:
sys.stderr.write("Unhandled %d of %s.\n" % (count, kind))
@retry_on_timeout()
def fetch_from_query(query, size):
return query.fetch(size)
class JobCollector(threading.Thread):
def __init__(self, job_manager, model, batch_size=20,
thread_num=THREAD_NUM):
threading.Thread.__init__(self)
self.job_manager = job_manager
self.model = model
self.batch_size = batch_size
self.thread_num = thread_num
self.exit_flag = False
def run(self):
query = db.Query(self.model, keys_only=True).order("__key__")
entities = fetch_from_query(query, self.batch_size)
while entities and not self.exit_flag:
self.job_manager.add(self.model, entities)
query = db.Query(self.model, keys_only=True) \
.order("__key__") \
.filter("__key__ >", entities[-1])
entities = fetch_from_query(query, self.batch_size)
self.job_manager.set_ready(self.model)
@retry_on_timeout()
def delete_entities(entities):
db.delete(entities)
class DeleteRunner(threading.Thread):
def __init__(self, job_manager):
threading.Thread.__init__(self)
self.job_manager = job_manager
self.exit_flag = False
def run(self):
while not self.exit_flag:
try:
(model, entities) = self.job_manager.queue.get_nowait()
sys.stderr.write("%s is deleting %d of %s entities.\n" %
(self.getName(), len(entities), model.kind()))
sys.stderr.flush()
delete_entities(entities)
self.job_manager.queue.task_done()
except Queue.Empty, e:
if self.job_manager.finished_collecting:
return
else:
time.sleep(1)
def any_thread_alive(threads):
for t in threads:
if t.isAlive():
return True
def delete_all_entities(models=None, batch_size=100):
models_dict = get_all_models_as_dict(only_polymodel_base=True)
if models is None:
models = models_dict.values()
if not isinstance(models, list):
models = [models]
target_models = []
for model in models:
if not (issubclass(model, db.Model) or \
issubclass(model, db.polymodel.PolyModel)):
sys.stderr.write("Invalid model: %s\n" % model)
return
if model is db.polymodel.PolyModel or model is db.Model:
continue
target_models.append(model)
job_manager = JobManager(target_models)
threads = []
for model in target_models:
job_collector = JobCollector(job_manager, model, batch_size)
threads.append(job_collector)
job_collector.start()
for i in range(THREAD_NUM):
t = DeleteRunner(job_manager)
threads.append(t)
t.start()
def handler(signum, frame):
for t in threads:
t.exit_flag = True
signal.signal(signal.SIGINT, handler)
while any_thread_alive(threads):
for t in threads:
if t.isAlive():
t.join(1)
job_manager.report_result()
def create_useful_locals():
local_d = {'db': db,
'settings': settings,
'dump': dump}
local_d.update(get_all_models_as_dict())
return local_d
def create_useful_locals_for_rshell():
local_d = {'delete_all_entities': delete_all_entities}
local_d.update(create_useful_locals())
return local_d
def shell(datastore_path='', history_path='', useful_imports=True,
use_ipython=True, use_sqlite=False):
""" Start a new interactive python session."""
banner = 'Interactive Kay Shell'
if useful_imports:
namespace = create_useful_locals()
else:
namespace = {}
appid = get_appid()
os.environ['APPLICATION_ID'] = appid
p = get_datastore_paths()
if not datastore_path:
datastore_path = p[0]
if not history_path:
history_path = p[1]
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if use_sqlite:
from google.appengine.datastore import datastore_sqlite_stub
stub = datastore_sqlite_stub.DatastoreSqliteStub(appid, datastore_path,
history_path)
else:
stub = datastore_file_stub.DatastoreFileStub(appid, datastore_path,
history_path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
if use_ipython:
try:
import IPython
except ImportError:
pass
else:
sh = IPython.Shell.IPShellEmbed(argv='', banner=banner)
sh(global_ns={}, local_ns=namespace)
return
sys.ps1 = '%s> ' % appid
if readline is not None:
readline.parse_and_bind('tab: complete')
atexit.register(lambda: readline.write_history_file(HISTORY_PATH))
if os.path.exists(HISTORY_PATH):
readline.read_history_file(HISTORY_PATH)
from code import interact
interact(banner, local=namespace)
# TODO: Need refactoring of following three functions.
def create_user(user_name=('u', ''), password=('P', ''), is_admin=('A', False),
appid=('a', ''), host=('h', ''), path=('p', ''), secure=True):
""" Create new user using remote_api.
"""
from kay.auth import (
create_new_user, DuplicateKeyError,
)
if not user_name:
print_status('user_name required')
sys.exit(1)
if not password:
password = getpass.getpass('Please input a password for new user:')
if not appid:
appid = get_appid()
if not host:
host = "%s.appspot.com" % appid
if not path:
path = '/remote_api'
remote_api_stub.ConfigureRemoteApi(None, path, auth_func,
host, secure=secure, save_cookies=True)
remote_api_stub.MaybeInvokeAuthentication()
try:
create_new_user(user_name, password, is_admin=is_admin)
print_status('A new user: %s successfully created.' % user_name)
sys.exit(0)
except DuplicateKeyError, e:
print_status(e)
sys.exit(1)
def clear_datastore(appid=('a', ''), host=('h', ''), path=('p', ''),
kinds=('k', ''), clear_memcache=('c', False), secure=True):
"""Clear all the data on GAE environment using remote_api.
"""
if not appid:
appid = get_appid()
if not host:
host = "%s.appspot.com" % appid
if not path:
path = '/remote_api'
if not kinds:
models = None
else:
models_dict = get_all_models_as_dict()
models = []
for kind in kinds.split(','):
models.append(db.class_for_kind(kind))
remote_api_stub.ConfigureRemoteApi(None, path, auth_func,
host, secure=secure, save_cookies=True)
remote_api_stub.MaybeInvokeAuthentication()
delete_all_entities(models)
if clear_memcache:
from google.appengine.api import memcache
memcache.flush_all()
sys.stderr.write("Flushed memcache.\n")
def rshell(appid=('a', ''), host=('h', ''), path=('p', ''),
useful_imports=True, secure=True, use_ipython=True):
"""Start a new interactive python session with RemoteDatastore stub."""
banner = ("Interactive Kay Shell with RemoteDatastore. \n"
"-----------------WARNING--------------------\n"
"\n"
"Please be careful in this console session.\n"
"\n"
"-----------------WARNING--------------------\n")
if useful_imports:
namespace = create_useful_locals_for_rshell()
else:
namespace = {}
if not appid:
appid = get_appid()
if not host:
host = "%s.appspot.com" % appid
if not path:
path = '/remote_api'
remote_api_stub.ConfigureRemoteApi(None, path, auth_func,
host, secure=secure, save_cookies=True)
remote_api_stub.MaybeInvokeAuthentication()
if use_ipython:
try:
import IPython
except ImportError:
pass
else:
sh = IPython.Shell.IPShellEmbed(
argv=['-pi1', '%s[\#]: ' % appid, '-po', '%s[\#]: ' % appid],
banner=banner)
sh(global_ns={}, local_ns=namespace)
return
sys.ps1 = '%s> ' % appid
if readline is not None:
readline.parse_and_bind('tab: complete')
atexit.register(lambda: readline.write_history_file(HISTORY_PATH))
if os.path.exists(HISTORY_PATH):
readline.read_history_file(HISTORY_PATH)
from code import interact
interact(banner, local=namespace)
|
sungkim11/mhargadh
|
refs/heads/master
|
django/contrib/messages/storage/user_messages.py
|
308
|
"""
Storages used to assist in the deprecation of contrib.auth User messages.
"""
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
class UserMessagesStorage(BaseStorage):
"""
Retrieves messages from the User, using the legacy user.message_set API.
This storage is "read-only" insofar as it can only retrieve and delete
messages, not store them.
"""
session_key = '_messages'
def _get_messages_queryset(self):
"""
Returns the QuerySet containing all user messages (or ``None`` if
request.user is not a contrib.auth User).
"""
user = getattr(self.request, 'user', None)
if isinstance(user, User):
return user._message_set.all()
def add(self, *args, **kwargs):
raise NotImplementedError('This message storage is read-only.')
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages assigned to the User. This backend never
stores anything, so all_retrieved is assumed to be False.
"""
queryset = self._get_messages_queryset()
if queryset is None:
# This is a read-only and optional storage, so to ensure other
# storages will also be read if used with FallbackStorage an empty
# list is returned rather than None.
return [], False
messages = []
for user_message in queryset:
messages.append(Message(constants.INFO, user_message.message))
return messages, False
def _store(self, messages, *args, **kwargs):
"""
Removes any messages assigned to the User and returns the list of
messages (since no messages are stored in this read-only storage).
"""
queryset = self._get_messages_queryset()
if queryset is not None:
queryset.delete()
return messages
class LegacyFallbackStorage(FallbackStorage):
"""
Works like ``FallbackStorage`` but also handles retrieving (and clearing)
contrib.auth User messages.
"""
storage_classes = (UserMessagesStorage,) + FallbackStorage.storage_classes
|
jillesme/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/test/skip.py
|
174
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
def skip_if(klass, condition, message=None, logger=None):
"""Makes all test_* methods in a given class no-ops if the given condition
is False. Backported from Python 3.1+'s unittest.skipIf decorator."""
if not logger:
logger = _log
if not condition:
return klass
for name in dir(klass):
attr = getattr(klass, name)
if not callable(attr):
continue
if not name.startswith('test_'):
continue
setattr(klass, name, _skipped_method(attr, message, logger))
klass._printed_skipped_message = False
return klass
def _skipped_method(method, message, logger):
def _skip(*args):
if method.im_class._printed_skipped_message:
return
method.im_class._printed_skipped_message = True
logger.info('Skipping %s.%s: %s' % (method.__module__, method.im_class.__name__, message))
return _skip
|
AlexanderSavelyev/rdkit
|
refs/heads/master
|
Docs/Book/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# RDKit documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 7 18:51:45 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('exts'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest','extapi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The RDKit'
copyright = u'2014, Greg Landrum'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.03'
# The full version, including alpha/beta/rc tags.
release = '2015.03.1pre'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../Images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**':['globaltoc.html','relations.html','sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RDKitdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RDKit.tex', u'RDKit Documentation',
u'Greg Landrum', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rdkit', u'RDKit Documentation',
[u'Greg Landrum'], 1)
]
|
pythonprobr/pypratico
|
refs/heads/master
|
web/pizza/entrega/models.py
|
1
|
# coding: utf-8
from django.db import models
DDD_DEFAULT = '11'
class Cliente(models.Model):
ddd = models.CharField(max_length=2, default=DDD_DEFAULT)
fone = models.CharField(max_length=8, db_index=True)
ramal = models.CharField(max_length=4, blank=True, db_index=True)
contato = models.CharField(max_length=64, db_index=True)
outros_contatos = models.TextField(blank=True)
logradouro = models.CharField(max_length=32, db_index=True)
numero = models.PositiveIntegerField('número', db_index=True)
complemento = models.CharField(max_length=32, blank=True)
obs = models.TextField(blank=True)
class Meta:
unique_together = ['ddd', 'fone', 'ramal']
ordering = ['fone', 'ramal']
def __str__(self):
fone = self.fone
if self.ddd != DDD_DEFAULT:
fone = '(%s)%s' % (self.ddd, fone)
if self.ramal:
fone += ' r.' + self.ramal
return '%s - %s' % (fone, self.contato)
def endereco(self):
end = '%s, %s' % (self.logradouro, self.numero)
if self.complemento:
end += ', ' + self.complemento
return end
endereco.short_description = u'endereço'
class Pedido(models.Model):
inclusao = models.DateTimeField(auto_now_add=True)
cliente = models.ForeignKey(Cliente)
entregador = models.ForeignKey('Entregador', null=True, blank=True)
partida = models.TimeField(null=True, blank=True)
class Meta:
ordering = ['-inclusao']
def __str__(self):
return '%s / %s' % (self.entrou(), self.cliente)
def entrou(self):
return self.inclusao.strftime('%H:%M')
def despachado(self):
return (self.entregador is not None) and (self.partida is not None)
despachado.boolean = True
def viagem(self):
if self.partida and self.entregador:
return '%s - %s' % (self.partida.strftime('%H:%M'), self.entregador)
else:
return ''
class Entregador(models.Model):
nome = models.CharField(max_length=64)
def __str__(self):
return '%s (%s)' % (self.nome, self.id)
class Meta:
verbose_name_plural = 'Entregadores'
SABORES = [
('atum', 'Atum'),
('calabresa', 'Calabresa'),
('catupiry', 'Catupiry'),
('marguerita', 'Marguerita'),
('mussarela', 'Mussarela'),
('portuguesa' , 'Portuguesa'),
('quatro queijos', 'Quatro Queijos'),
]
class Pizza(models.Model):
pedido = models.ForeignKey(Pedido)
sabor1 = models.CharField('sabor 1', max_length=32, choices=SABORES)
coberto1 = models.BooleanField('cob.')
sabor2 = models.CharField('sabor 2', max_length=32, choices=SABORES, blank=True)
coberto2 = models.BooleanField('cob.')
def __str__(self):
sabor = self.sabor1
if self.coberto1:
sabor += ' coberta'
if self.sabor2:
sabor2 = self.sabor2
if self.coberto2:
sabor2 += ' coberta'
sabor = '½ %s, ½ %s' % (sabor, sabor2)
return sabor
|
Omegaphora/external_chromium_org
|
refs/heads/lp5.1
|
third_party/tlslite/tlslite/integration/tlssocketservermixin.py
|
116
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + SocketServer."""
from tlslite.tlsconnection import TLSConnection
class TLSSocketServerMixIn:
"""
This class can be mixed in with any L{SocketServer.TCPServer} to
add TLS support.
To use this class, define a new class that inherits from it and
some L{SocketServer.TCPServer} (with the mix-in first). Then
implement the handshake() method, doing some sort of server
handshake on the connection argument. If the handshake method
returns True, the RequestHandler will be triggered. Below is a
complete example of a threaded HTTPS server::
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
from tlslite import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
sessionCache = SessionCache()
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,
HTTPServer):
def handshake(self, tlsConnection):
try:
tlsConnection.handshakeServer(certChain=certChain,
privateKey=privateKey,
sessionCache=sessionCache)
tlsConnection.ignoreAbruptClose = True
return True
except TLSError, error:
print "Handshake failure:", str(error)
return False
httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)
httpd.serve_forever()
"""
def finish_request(self, sock, client_address):
tlsConnection = TLSConnection(sock)
if self.handshake(tlsConnection) == True:
self.RequestHandlerClass(tlsConnection, client_address, self)
tlsConnection.close()
#Implement this method to do some form of handshaking. Return True
#if the handshake finishes properly and the request is authorized.
def handshake(self, tlsConnection):
raise NotImplementedError()
|
espadrine/opera
|
refs/heads/master
|
chromium/src/tools/grit/grit/node/io_unittest.py
|
9
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for io.FileNode'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import os
import StringIO
import unittest
from grit.node import misc
from grit.node import io
from grit.node import empty
from grit import grd_reader
from grit import util
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', ur'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
self.failUnless(root.ToRealPath(file_node.GetInputPath()) ==
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
def VerifyCliquesContainEnglishAndFrenchAndNothingElse(self, cliques):
for clique in cliques:
self.failUnlessEquals(len(clique[0].clique), 2)
self.failUnless('en' in cliques[i][0].clique)
self.failUnless('fr' in cliques[i][0].clique)
def testLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
grd.SetOutputLanguage('fr')
grd.RunGatherers()
def testConditionalLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<translations>
<if expr="True">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
<if expr="False">
<file path="no_such_file.xtb" lang="de" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testConditionalOutput(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/test/data'),
defines={})
grd.SetOutputLanguage('en')
grd.RunGatherers()
outputs = grd.GetChildrenOfType(io.OutputNode)
active = set(grd.ActiveDescendants())
self.failUnless(outputs[0] in active)
self.failUnless(outputs[0].GetType() == 'rc_header')
self.failUnless(outputs[1] in active)
self.failUnless(outputs[1].GetType() == 'rc_all')
self.failUnless(outputs[2] not in active)
self.failUnless(outputs[2].GetType() == 'rc_all')
if __name__ == '__main__':
unittest.main()
|
gdb/pyseidon
|
refs/heads/master
|
pyseidon/__init__.py
|
1
|
import array
import atexit
import errno
import fcntl
import _multiprocessing
import os
import select
import signal
import socket
import struct
import sys
# Read a line up to a custom delimiter
def _recvline(io, delim=b'\n'):
buf = []
while True:
byte = io.recv(1)
buf.append(byte)
# End of line reached!
if byte == b'' or byte == delim:
return b''.join(buf)
def _recvfds(sock):
msg, anc, flags, addr = sock.recvmsg(1, 4096)
fds = []
for level, type, data in anc:
fda = array.array('I')
fda.frombytes(data)
fds.extend(fda)
return fds
def _recvfd(sock):
fds = _recvfds(sock)
assert len(fds) == 1, 'Expected exactly one FD, but got: {}'.format(fds)
return fds[0]
class Pyseidon(object):
def __init__(self, path='/tmp/pyseidon.sock'):
self.path = path
self.children = {}
self.master_pid = os.getpid()
r, w = os.pipe()
# These are purely informational, so there's no point in
# blocking on them.
fcntl.fcntl(r, fcntl.F_SETFL, fcntl.fcntl(r, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(w, fcntl.F_SETFL, fcntl.fcntl(w, fcntl.F_GETFL) | os.O_NONBLOCK)
self.loopbreak_reader = os.fdopen(r, 'rb', 0)
self.loopbreak_writer = os.fdopen(w, 'wb', 0)
def _run_event_loop(self):
while True:
conns = {}
for child in self.children.values():
if not child['notified']:
conns[child['conn']] = child
try:
# We want to detect when a client has hung up (so we
# can tell the child about this). See
# http://stefan.buettcher.org/cs/conn_closed.html for
# another way of solving this problem with poll(2).
candidates = [self.loopbreak_reader, self.sock] + list(conns.keys())
readers, _, _ = select.select(candidates, [], [])
except select.error as e:
if e.errno == errno.EINTR:
# Probably just got a SIGCHLD. We'll forfeit this run
# through the loop.
continue
else:
raise
for reader in readers:
if reader == self.loopbreak_reader:
# Drain the loopbreak reader
self.loopbreak_reader.read()
self._reap()
elif reader == self.sock:
argv = self._accept()
# In the master, we'll just hit another cycle through
# the loop.
if not self._is_master():
return argv
elif reader in conns:
child = conns[reader]
data = self._socket_peek(reader)
if len(data) == 0:
self._notify_socket_dead(child)
elif data is None:
raise RuntimeError('Socket unexpectedly showed up in readers list, but has nothing to read: child={}'.format(child['pid']))
else:
raise RuntimeError('Socket unexpectedly had available data: child={} data={}'.format(child['pid'], data))
def _socket_peek(self, sock):
try:
# See what data is available
data = sock.recv(256, socket.MSG_PEEK | socket.MSG_DONTWAIT)
except socket.error as e:
if e.errno == errno.EAGAIN:
# Socket is fine, and there's nothing to read.
return None
# Hm, something's wrong.
raise
else:
return data
def _notify_socket_dead(self, child):
child['notified'] = True
print('[{}] Client disconnected; sending HUP: child={}'.format(os.getpid(), child['pid']), file=sys.stderr)
try:
# HUP is about right for this.
os.kill(child['pid'], signal.SIGHUP)
except OSError as e:
# ESRCH means the process is dead, and it'll get cleaned
# up automatically.
if e.errno != errno.ESRCH:
raise
def _listen(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Make sure this socket is readable only by the current user,
# since it'll give possibly arbitrary code execution to anyone
# who can connect to it.
umask = os.umask(0o077)
try:
self.sock.bind(self.path)
finally:
os.umask(umask)
atexit.register(self._remove_socket)
self.sock.listen(1)
print('[{}] Pyseidon master booted'.format(os.getpid()), file=sys.stderr)
def _accept(self):
conn, _ = self.sock.accept()
# Note that these will be blocking, so a slow or misbehaving
# client could in theory cause issues. We could solve this by
# adding these to the event loop.
argv = self._read_argv(conn)
env = self._read_env(conn)
cwd = self._read_cwd(conn)
fds = self._read_fds(conn)
pid = os.fork()
if pid:
# Master
print('[{}] Spawned worker: pid={} argv={} cwd={}'.format(os.getpid(), pid, argv, cwd), file= sys.stderr)
# Do not want these FDs
for fd in fds:
fd.close()
self.children[pid] = {'conn': conn, 'pid': pid, 'notified': False}
else:
# Worker
self._setup_env(conn, argv, env, cwd, fds)
return argv
def _setup_env(self, conn, argv, env, cwd, fds):
# Close now-unneeded file descriptors
conn.close()
self.loopbreak_reader.close()
self.loopbreak_writer.close()
self.sock.close()
print('[{}] cwd={} argv={} env_count={}'.format(os.getpid(), cwd, argv, len(env)), file=sys.stderr)
# Python doesn't natively let you set your actual
# procname. TODO: consider importing a library for that.
sys.argv = [a.decode('utf-8') for a in argv[1:]]
env = {k.decode('utf-8'): v.decode('utf-8') for k, v in env.items()}
# This changes the actual underlying environment
os.environ.clear()
os.environ.update(env)
os.chdir(cwd)
# Set up file descriptors
stdin, stdout, stderr = fds
os.dup2(stdin.fileno(), 0)
os.dup2(stdout.fileno(), 1)
os.dup2(stderr.fileno(), 2)
stdin.close()
stdout.close()
stderr.close()
def _is_master(self):
return os.getpid() == self.master_pid
def _remove_socket(self):
# Don't worry about removing the socket if a worker exits
if not self._is_master():
return
try:
os.unlink(self.path)
except OSError:
if os.path.exists(self.path):
raise
def _read_argv(self, conn):
return self._read_array(conn)
def _read_env(self, conn):
env = {}
kv_pairs = self._read_array(conn)
for kv in kv_pairs:
k, v = kv.split(b'=', 1)
env[k] = v
return env
def _read_array(self, conn):
argc_packed = conn.recv(4)
argc, = struct.unpack('I', argc_packed)
argv = []
for i in range(argc):
line = _recvline(conn, b'\0')
if line[-1:] != b'\0':
raise RuntimeError("Corrupted array; not null terminated: {}".format(line))
argv.append(line.rstrip(b'\0'))
return argv
def _read_cwd(self, conn):
line = _recvline(conn, b'\0')
if line[-1:] != b'\0':
raise RuntimeError("Corrupted cwd; not null terminated: {}".format(line))
return line.rstrip(b'\0')
def _read_fds(self, conn):
stdin = os.fdopen(_recvfd(conn))
stdout = os.fdopen(_recvfd(conn), 'w')
stderr = os.fdopen(_recvfd(conn), 'w')
return stdin, stdout, stderr
def _break_loop(self, signum, stack):
try:
self.loopbreak_writer.write(b'a')
except IOError as e:
# The pipe is full. This is surprising, but could happen
# in theory if we're being spammed with dying children.
if t.errno == errno.EAGAIN:
return
else:
raise
def _reap(self):
try:
while True:
pid, exitinfo = os.waitpid(-1, os.WNOHANG)
if pid == 0:
# Just means there's an extra child hanging around
break
signal = exitinfo % 2**8
status = exitinfo >> 8
if signal:
print('[{}] Worker {} exited due to signal {}'.format(os.getpid(), pid, signal), file=sys.stderr)
# In this case, we'll just have the client exit
# with an arbitrary status 100.
client_exit = 100
else:
if pid in self.children:
print('[{}] Worker {} exited with status {}'.format(os.getpid(), pid, status), file=sys.stderr)
else:
print('[{}] Non-worker child process {} exited with status {}'.format(os.getpid(), pid, status), file=sys.stderr)
continue
client_exit = status
conn = self.children[pid]['conn']
try:
# TODO: make this non-blocking
conn.send(struct.pack('I', client_exit))
except socket.error as e:
# Shouldn't care if the client has died in the
# meanwhile. Their loss!
if e.errno == errno.EPIPE:
pass
else:
raise
conn.close()
del self.children[pid]
except OSError as e:
# Keep going until we run out of dead workers
if e.errno == errno.ECHILD:
return
else:
raise
def run(self, callback):
# Install SIGCHLD handler so we know when workers exit
old = signal.signal(signal.SIGCHLD, self._break_loop)
# Start listening on the UNIX socket
self._listen()
# And do the actual workhorse
self._run_event_loop()
# Get rid of that handler
signal.signal(signal.SIGCHLD, old)
# In theory we might add the ability for the master to
# gracefully exit.
if self._is_master():
return
# Guess we're in a worker process.
callback()
sys.exit(0)
|
nathanbjenx/cairis
|
refs/heads/master
|
cairis/gui/SecurityPatternNotebook.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from PatternStructureListCtrl import PatternStructureListCtrl
from RequirementListCtrl import RequirementListCtrl
__author__ = 'Shamal Faily'
class MLTextPage(wx.Panel):
def __init__(self,parent,winId):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
narrativeBox = wx.StaticBox(self,-1)
narrativeBoxSizer = wx.StaticBoxSizer(narrativeBox,wx.HORIZONTAL)
topSizer.Add(narrativeBoxSizer,1,wx.EXPAND)
self.narrativeCtrl = wx.TextCtrl(self,winId,'',style=wx.TE_MULTILINE)
narrativeBoxSizer.Add(self.narrativeCtrl,1,wx.EXPAND)
self.SetSizer(topSizer)
class StructurePage(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
asBox = wx.StaticBox(self,-1)
asBoxSizer = wx.StaticBoxSizer(asBox,wx.HORIZONTAL)
topSizer.Add(asBoxSizer,1,wx.EXPAND)
self.associationList = PatternStructureListCtrl(self)
asBoxSizer.Add(self.associationList,1,wx.EXPAND)
self.SetSizer(topSizer)
class RequirementsPage(wx.Panel):
def __init__(self,parent,structPage):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
asBox = wx.StaticBox(self,-1)
asBoxSizer = wx.StaticBoxSizer(asBox,wx.HORIZONTAL)
topSizer.Add(asBoxSizer,1,wx.EXPAND)
self.requirementList = RequirementListCtrl(self,structPage.associationList)
asBoxSizer.Add(self.requirementList,1,wx.EXPAND)
self.SetSizer(topSizer)
class SecurityPatternNotebook(wx.Notebook):
def __init__(self,parent):
wx.Notebook.__init__(self,parent,SECURITYPATTERN_NOTEBOOKPATTERN_ID)
p1 = MLTextPage(self,SECURITYPATTERN_TEXTCONTEXT_ID)
p2 = MLTextPage(self,SECURITYPATTERN_TEXTPROBLEM_ID)
p3 = MLTextPage(self,SECURITYPATTERN_TEXTSOLUTION_ID)
p4 = StructurePage(self)
p5 = RequirementsPage(self,p4)
self.AddPage(p1,'Context')
self.AddPage(p2,'Problem')
self.AddPage(p3,'Solution')
self.AddPage(p4,'Structure')
self.AddPage(p5,'Requirements')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.