repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
meabsence/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/distutils/tests/test_bdist_wininst.py
|
53
|
"""Tests for distutils.command.bdist_wininst."""
import unittest
from test.support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertTrue(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
roxyboy/bokeh
|
refs/heads/master
|
bokeh/charts/builder/line_builder.py
|
43
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
import numpy as np
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line as LineGlyph
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Line(values, index=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
return create_and_build(LineBuilder, values, index=index, **kws)
class LineBuilder(Builder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
xs = self._values_index
self.set_and_get("x", "", np.array(xs))
for col, values in self._values.items():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
for i, duplet in enumerate(self._attr[1:], start=1):
glyph = LineGlyph(x='x', y=duplet, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
|
teto/i3pystatus
|
refs/heads/master
|
i3pystatus/calendar/__init__.py
|
2
|
import inspect
import re
import threading
from abc import abstractmethod
from datetime import datetime, timedelta
from i3pystatus import IntervalModule, formatp, SettingsBase
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.desktop import DesktopNotification
humanize_imported = False
try:
import humanize
humanize_imported = True
except ImportError:
pass
def strip_microseconds(delta):
return delta - timedelta(microseconds=delta.microseconds)
def formatter(func):
""" Decorator to mark a CalendarEvent method as a formatter. """
func.formatter = True
return func
class CalendarEvent:
"""
Simple class representing an Event. The attributes title, start, end and recurring are required as
these will be used for the formatters. The id attribute is used to uniquely identify the event.
If a backend wishes to provide extra formatters to the user, this can be done by adding additional
methods and decorating them with the @formatter decorator. See the LightningCalendarEvent from the
lightning module for an example of this.
"""
# Unique identifier for this event
id = None
# The title of this event
title = None
# Datetime object representing when this event begins
start = None
# Datetime object representing when this event ends
end = None
# Whether or not this event is a recurring event
recurring = False
def formatters(self):
"""
Build a dictionary containing all those key/value pairs that will be exposed to the user via formatters.
"""
event_dict = dict(
title=self.title,
remaining=self.time_remaining,
humanize_remaining=self.humanize_time_remaining,
)
def is_formatter(x):
return inspect.ismethod(x) and hasattr(x, 'formatter') and getattr(x, 'formatter')
for method_name, method in inspect.getmembers(self, is_formatter):
event_dict[method_name] = method()
return event_dict
@property
def time_remaining(self):
return strip_microseconds(self.start - datetime.now(tz=self.start.tzinfo))
@property
def humanize_time_remaining(self):
if humanize_imported:
return humanize.naturaltime(datetime.now(tz=self.start.tzinfo) - self.start)
def __str__(self):
return "{}(title='{}', start={}, end={}, recurring={})" \
.format(type(self).__name__,
self.title,
repr(self.start),
repr(self.end),
self.recurring)
class CalendarBackend(SettingsBase):
"""
Base class for calendar backend. Subclasses should implement update and populate the events list.
Optionally, subclasses can override on_click to perform actions on the current event when clicked.
"""
def init(self):
self.events = []
@abstractmethod
def update(self):
""" Subclasses should implement this method and populate the events list with CalendarEvents."""
def on_click(self, event):
""" Override this method to do more interesting things with the event. """
DesktopNotification(
title=event.title,
body="{} until {}!".format(event.time_remaining, event.title),
icon='dialog-information',
urgency=1,
timeout=0,
).display()
def __iter__(self):
return iter(self.events)
def __len__(self):
return len(self.events)
class Calendar(IntervalModule, ColorRangeModule):
"""
Generic calendar module. Requires the PyPI package ``colour``.
.. rubric:: Available formatters
* {title} - the title or summary of the event
* {remaining_time} - how long until this event is due
* {humanize_remaining} - how long until this event is due in human readable format
Additional formatters may be provided by the backend, consult their documentation for details.
.. note:: Optionally requires `humanize` to display time in human readable format.
"""
settings = (
('format', 'Format string to display in the bar'),
('backend', 'Backend to use for collecting calendar events'),
('skip_recurring', 'Whether or not to skip recurring events'),
('skip_all_day', 'Whether or not to skip all day events'),
('skip_regex', 'Skip events with titles that match this regex'),
('update_interval', "How often in seconds to call the backend's update method"),
('urgent_seconds', "When within this many seconds of the event, set the urgent flag"),
('urgent_blink', 'Whether or not to blink when within urgent_seconds of the event'),
('dynamic_color', 'Whether or not to change color as the event approaches'),
)
required = ('backend',)
skip_recurring = False
skip_all_day = False
skip_regex = None
interval = 1
backend = None
update_interval = 600
dynamic_color = True
urgent_seconds = 300
urgent_blink = False
current_event = None
urgent_acknowledged = False
format = "{title} - {remaining}"
on_rightclick = 'handle_click'
on_leftclick = 'acknowledge'
def init(self):
if 'humanize_remaining' in self.format and not humanize_imported:
raise ImportError('Missing humanize module')
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_thread, daemon=True)
self.thread.start()
self.colors = self.get_hex_color_range(self.end_color, self.start_color, self.urgent_seconds * 2)
def update_thread(self):
self.refresh_events()
while True:
with self.condition:
self.condition.wait(self.update_interval)
self.refresh_events()
def refresh_events(self):
self.backend.update()
def valid_event(ev):
if self.skip_all_day and not isinstance(ev.start, datetime):
return False
if self.skip_recurring and ev.recurring:
return False
if self.skip_regex and re.search(self.skip_regex, ev.title) is not None:
return False
elif ev.time_remaining < timedelta(seconds=0):
return False
return True
for event in self.backend:
if valid_event(event):
if self.current_event and self.current_event.id != event.id:
self.urgent_acknowledged = False
self.current_event = event
return
self.current_event = None
def run(self):
if self.current_event and self.current_event.time_remaining > timedelta(seconds=0):
self.output = {
"full_text": formatp(self.format, **self.current_event.formatters()),
"color": self.get_color() if self.dynamic_color else None,
"urgent": self.is_urgent()
}
else:
self.output = {}
def handle_click(self):
if self.current_event:
self.backend.on_click(self.current_event)
def get_color(self):
if self.current_event.time_remaining.days > 0:
color = self.colors[-1]
else:
p = self.percentage(self.current_event.time_remaining.seconds, self.urgent_seconds)
color = self.get_gradient(p, self.colors)
return color
def is_urgent(self):
"""
Determine whether or not to set the urgent flag. If urgent_blink is set, toggles urgent flag
on and off every second.
"""
if not self.current_event:
return False
now = datetime.now(tz=self.current_event.start.tzinfo)
alert_time = now + timedelta(seconds=self.urgent_seconds)
urgent = alert_time > self.current_event.start
if urgent and self.urgent_blink:
urgent = now.second % 2 == 0 and not self.urgent_acknowledged
return urgent
def acknowledge(self):
self.urgent_acknowledged = not self.urgent_acknowledged
|
amigcamel/taipei.py
|
refs/heads/master
|
templates/pages/gallery.html.py
|
1
|
BBBBBBB BBBBBBBBBBBBBBBBB
BBBB BBBBBBBBBBBBBB BBBBBBBBBBB
BBBBB BBBBBBBBBBBB
BBBBBBBB
BBBBB BBBB
BBBBBBBB BBBBBBBBBBBBBBBBBBBB
FFFFFFFFFFFFFFFFFFFF
BBBBBBBBBBB
XXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXX
BBBB BBBBBBBBBBBBBBBBBBBBBBB BB BBBBBB
BBB BBBBB BB BBBBBB
XXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX XXXXXX
XXXX XXXXXBBBBBBBBB BBBBBBBBBB BBB BBBX XXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXX XXXXXXBBBBBBBBB BBBBBBBBBB BBB BBBX XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXXXX XX XX XXX
XXXXXXXXXX XX XXXX XXX
XXXXXXXXXX XX XXXX XXX
XXXXXXXXXX XXX XXXXXX XXX
BBBBBB
BBBBBBB
XXXXXX
XXXXXX
XXXXXX
BBBBBBBB
BBBBB BBBBBBBBBBBBB
BBBBBBBB
|
nvlsianpu/mbed
|
refs/heads/master
|
tools/get_config.py
|
23
|
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import isdir, abspath, dirname, join
from os import _exit
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.utils import args_error
from tools.options import get_default_options_parser
from tools.build_api import get_config
from config import Config
from utils import argparse_filestring_type
try:
import tools.private_settings as ps
except:
ps = object()
if __name__ == '__main__':
# Parse Options
parser = get_default_options_parser(add_clean=False, add_options=False)
parser.add_argument("--source", dest="source_dir", type=argparse_filestring_type, required=True,
default=[], help="The source (input) directory", action="append")
parser.add_argument("--prefix", dest="prefix", action="append",
default=[], help="Restrict listing to parameters that have this prefix")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Verbose diagnostic output")
options = parser.parse_args()
# Target
if options.mcu is None :
args_error(parser, "argument -m/--mcu is required")
target = options.mcu[0]
# Toolchain
if options.tool is None:
args_error(parser, "argument -t/--toolchain is required")
toolchain = options.tool[0]
options.prefix = options.prefix or [""]
try:
params, macros, features = get_config(options.source_dir, target, toolchain)
if not params and not macros:
print "No configuration data available."
_exit(0)
if params:
print "Configuration parameters"
print "------------------------"
for p in params:
for s in options.prefix:
if p.startswith(s):
print(str(params[p]) if not options.verbose else params[p].get_verbose_description())
break
print ""
print "Macros"
print "------"
if macros:
print 'Defined with "macros":', Config.config_macros_to_macros(macros)
print "Generated from configuration parameters:", Config.parameters_to_macros(params)
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except Exception,e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
sys.exit(1)
|
yeukhon/homework
|
refs/heads/master
|
computer-security/elgamal-attack/getstuff.py
|
1
|
#!/usr/bin/python
import numbthy
def primes(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Returns a list of primes < n """
sieve = [True] * n
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)
return [2] + [i for i in xrange(3,n,2) if sieve[i]]
ps = primes(3000000)
for i in xrange(numbthy.powmod
|
chronicle/api-samples-python
|
refs/heads/master
|
detect/v2/run_retrohunt_and_wait.py
|
1
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable and reusable sample for running a retrohunt and waiting for it.
This module demonstrates combining multiple single-purpose modules into a larger
workflow.
"""
import argparse
import datetime
import json
import time
from typing import Any, Mapping, Sequence, Tuple
from google.auth.transport import requests
from common import chronicle_auth
from common import datetime_converter
from . import cancel_retrohunt
from . import get_retrohunt
from . import list_detections
from . import run_retrohunt
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
# Sleep duration used to wait until next retrohunt status check.
DEFAULT_SLEEP_SECONDS = 5
# Timeout used to wait until retrohunt is complete.
DEFAULT_TIMEOUT_MINUTES = 1440.0 # 1 day = 60 * 24 = 1440 minutes.
def get_retrohunt_info(
retrohunt: Mapping[str, Any]) -> Tuple[str, str, str, float]:
"""Helper function to extract versionId, retrohuntId, state, and progressPercentage from retrohunt.
Args:
retrohunt: Retrohunt in a Mapping format.
Returns:
versionId, retrohuntId, state, progressPercentage.
"""
return (retrohunt.get("versionId", ""), retrohunt.get("retrohuntId", ""),
retrohunt.get("state", "STATE_UNSPECIFIED"),
retrohunt.get("progressPercentage", 0.0))
def run_retrohunt_and_wait(
http_session: requests.AuthorizedSession,
version_id: str,
start_time: datetime.datetime,
end_time: datetime.datetime,
sleep_seconds: int = DEFAULT_SLEEP_SECONDS,
timeout_minutes: float = DEFAULT_TIMEOUT_MINUTES,
page_size: int = 0
) -> Tuple[Sequence[Mapping[str, Any]], str]:
"""Runs a retrohunt and wait, and receive detections.
When retrohunt does not complete within the 'timeout_minutes' time period,
it cancels the retrohunt and returns TimeoutError.
Args:
http_session: Authorized session for HTTP requests.
version_id: Unique ID of the detection rule to retrieve errors for
("ru_<UUID>" or "ru_<UUID>@v_<seconds>_<nanoseconds>"). If a version
suffix isn't specified we use the rule's latest version.
start_time: The start time of the time range the retrohunt will process.
end_time: The end time of the time range the retrohunt will process.
sleep_seconds: Optional interval between retrohunt status checks, until it's
DONE or CANCELLED.
timeout_minutes: Optional timeout in minutes. This is used to wait for the
retrohunt to complete.
page_size: Maximum number of detections in the response. Must be
non-negative. This is optional. If not provided, default value 100 is
applied.
Returns:
First page of detections and page token, which is a Base64 token for
getting the detections of the next page (an empty token string means the
currently retrieved page is the last one).
Raises:
requests.exceptions.HTTPError: HTTP request resulted in an error
(response.status_code >= 400).
TimeoutError: When retrohunt does not complete by timeout.
"""
deadline = datetime.datetime.now() + datetime.timedelta(
minutes=timeout_minutes)
# Start RunRetrohunt by calling RunRetrohunt.
retrohunt_rep = run_retrohunt.run_retrohunt(http_session, version_id,
start_time, end_time)
version_id, retrohunt_id, state, progress_percentage = get_retrohunt_info(
retrohunt_rep)
print(f"Retrohunt started. retrohunt_id: {retrohunt_id}")
now = datetime.datetime.now()
while now < deadline and state not in ("DONE", "CANCELLED"):
print((f"Waiting for retrohunt to complete. Retrohunt is running at "
f"{progress_percentage}% .."))
time.sleep(sleep_seconds)
retrohunt_rep = get_retrohunt.get_retrohunt(http_session,
version_id,
retrohunt_id)
version_id, retrohunt_id, state, progress_percentage = get_retrohunt_info(
retrohunt_rep)
now = datetime.datetime.now()
# We finished waiting for the retrohunt to complete. We cancel the retrohunt
# if it is still running.
if state == "RUNNING":
print((f"Retrohunt did not complete. "
f"Cancelling retrohunt for versionID: {version_id}"))
# When cancel_retrohunt fails, it raises error and stop the script here.
cancel_retrohunt.cancel_retrohunt(http_session, version_id,
retrohunt_id)
raise TimeoutError(
f"Retrohunt not completed after {timeout_minutes} minutes.")
print("Returning first page of detections.")
return list_detections.list_detections(
http_session,
version_id=version_id,
page_size=page_size,
detection_start_time=start_time,
detection_end_time=end_time)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
parser.add_argument(
"-vi",
"--version_id",
type=str,
required=True,
help="version ID ('ru_<UUID>[@v_<seconds>_<nanoseconds>]')")
parser.add_argument(
"-st",
"--start_time",
type=datetime_converter.iso8601_datetime_utc,
required=True,
help="Event start time in UTC ('yyyy-mm-ddThh:mm:ssZ')")
parser.add_argument(
"-et",
"--end_time",
type=datetime_converter.iso8601_datetime_utc,
required=True,
help="Event end time in UTC ('yyyy-mm-ddThh:mm:ssZ')")
parser.add_argument(
"-ss",
"--sleep_seconds",
type=int,
default=DEFAULT_SLEEP_SECONDS,
help="interval between retrohunt status polls in seconds (default = 5)")
parser.add_argument(
"-tm",
"--timeout_minutes",
type=float,
default=DEFAULT_TIMEOUT_MINUTES,
help=("timeout in minutes (default = 1 day) used to wait for retrohunt"
"to complete and return detections"))
parser.add_argument(
"-s",
"--page_size",
type=int,
required=False,
help="maximum number of detections to return")
args = parser.parse_args()
session = chronicle_auth.initialize_http_session(args.credentials_file)
detections, next_page_token = run_retrohunt_and_wait(
session, args.version_id, args.start_time, args.end_time,
args.sleep_seconds, args.timeout_minutes, args.page_size)
print(json.dumps(detections, indent=2))
print(f"Next page token: {next_page_token}")
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/bigquery/logging/v1/google-cloud-bigquery-logging-v1-py/tests/unit/gapic/__init__.py
|
951
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
raycarnes/addons-yelizariev
|
refs/heads/8.0
|
delivery_special/models.py
|
16
|
from openerp.osv import osv, fields
class delivery_grid(osv.osv):
_inherit = "delivery.grid"
def get_price(self, cr, uid, id, order, dt, context=None):
total = 0
weight = 0
volume = 0
quantity = 0
special_delivery = 0
product_uom_obj = self.pool.get('product.uom')
for line in order.order_line:
if not line.product_id or line.is_delivery:
continue
q = product_uom_obj._compute_qty(cr, uid, line.product_uom.id, line.product_uom_qty, line.product_id.uom_id.id)
weight += (line.product_id.weight or 0.0) * q
volume += (line.product_id.volume or 0.0) * q
special_delivery += (line.product_id.special_delivery or 0.0) * q
quantity += q
total = order.amount_total or 0.0
return self.get_price_from_picking(cr, uid, id, total,weight, volume, quantity, special_delivery, context=context)
def get_price_from_picking(self, cr, uid, id, total, weight, volume, quantity, special_delivery=0, context=None):
grid = self.browse(cr, uid, id, context=context)
price = 0.0
ok = False
price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight, 'quantity': quantity, 'special_delivery':special_delivery}
for line in grid.line_ids:
test = eval(line.type+line.operator+str(line.max_value), price_dict)
if test:
if line.price_type=='variable':
price = line.list_price * price_dict[line.variable_factor]
else:
price = line.list_price
ok = True
break
if not ok:
raise osv.except_osv(_("Unable to fetch delivery method!"), _("Selected product in the delivery method doesn't fulfill any of the delivery grid(s) criteria."))
return price
class delivery_grid_line(osv.osv):
_inherit = "delivery.grid.line"
_columns = {
'type': fields.selection([('weight','Weight'),('volume','Volume'),\
('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity'), ('special_delivery','Special Delivery')],\
'Variable', required=True),
}
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'special_delivery': fields.integer('Special Delivery', help='Allows make special rules at delivery grid. Can be negative')
}
_defaults = {
'special_delivery': 0,
}
|
rven/odoo
|
refs/heads/14.0-fix-partner-merge-mail-activity
|
addons/lunch/models/lunch_order.py
|
2
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class LunchOrder(models.Model):
_name = 'lunch.order'
_description = 'Lunch Order'
_order = 'id desc'
_display_name = 'product_id'
name = fields.Char(related='product_id.name', string="Product Name", readonly=True) # to remove
topping_ids_1 = fields.Many2many('lunch.topping', 'lunch_order_topping', 'order_id', 'topping_id', string='Extras 1', domain=[('topping_category', '=', 1)])
topping_ids_2 = fields.Many2many('lunch.topping', 'lunch_order_topping', 'order_id', 'topping_id', string='Extras 2', domain=[('topping_category', '=', 2)])
topping_ids_3 = fields.Many2many('lunch.topping', 'lunch_order_topping', 'order_id', 'topping_id', string='Extras 3', domain=[('topping_category', '=', 3)])
product_id = fields.Many2one('lunch.product', string="Product", required=True)
category_id = fields.Many2one(
string='Product Category', related='product_id.category_id', store=True)
date = fields.Date('Order Date', required=True, readonly=True,
states={'new': [('readonly', False)]},
default=fields.Date.context_today)
supplier_id = fields.Many2one(
string='Vendor', related='product_id.supplier_id', store=True, index=True)
user_id = fields.Many2one('res.users', 'User', readonly=True,
states={'new': [('readonly', False)]},
default=lambda self: self.env.uid)
note = fields.Text('Notes')
price = fields.Float('Total Price', compute='_compute_total_price', readonly=True, store=True,
digits='Account')
active = fields.Boolean('Active', default=True)
state = fields.Selection([('new', 'To Order'),
('ordered', 'Ordered'),
('confirmed', 'Received'),
('cancelled', 'Cancelled')],
'Status', readonly=True, index=True, default='new')
company_id = fields.Many2one('res.company', default=lambda self: self.env.company.id)
currency_id = fields.Many2one(related='company_id.currency_id', store=True)
quantity = fields.Float('Quantity', required=True, default=1)
display_toppings = fields.Text('Extras', compute='_compute_display_toppings', store=True)
product_description = fields.Text('Description', related='product_id.description')
topping_label_1 = fields.Char(related='product_id.category_id.topping_label_1')
topping_label_2 = fields.Char(related='product_id.category_id.topping_label_2')
topping_label_3 = fields.Char(related='product_id.category_id.topping_label_3')
topping_quantity_1 = fields.Selection(related='product_id.category_id.topping_quantity_1')
topping_quantity_2 = fields.Selection(related='product_id.category_id.topping_quantity_2')
topping_quantity_3 = fields.Selection(related='product_id.category_id.topping_quantity_3')
image_1920 = fields.Image(compute='_compute_product_images')
image_128 = fields.Image(compute='_compute_product_images')
available_toppings_1 = fields.Boolean(help='Are extras available for this product', compute='_compute_available_toppings')
available_toppings_2 = fields.Boolean(help='Are extras available for this product', compute='_compute_available_toppings')
available_toppings_3 = fields.Boolean(help='Are extras available for this product', compute='_compute_available_toppings')
@api.depends('product_id')
def _compute_product_images(self):
for line in self:
line.image_1920 = line.product_id.image_1920 or line.category_id.image_1920
line.image_128 = line.product_id.image_128 or line.category_id.image_128
@api.depends('category_id')
def _compute_available_toppings(self):
for line in self:
line.available_toppings_1 = bool(line.env['lunch.topping'].search_count([('category_id', '=', line.category_id.id), ('topping_category', '=', 1)]))
line.available_toppings_2 = bool(line.env['lunch.topping'].search_count([('category_id', '=', line.category_id.id), ('topping_category', '=', 2)]))
line.available_toppings_3 = bool(line.env['lunch.topping'].search_count([('category_id', '=', line.category_id.id), ('topping_category', '=', 3)]))
def init(self):
self._cr.execute("""CREATE INDEX IF NOT EXISTS lunch_order_user_product_date ON %s (user_id, product_id, date)"""
% self._table)
def _extract_toppings(self, values):
"""
If called in api.multi then it will pop topping_ids_1,2,3 from values
"""
if self.ids:
# TODO This is not taking into account all the toppings for each individual order, this is usually not a problem
# since in the interface you usually don't update more than one order at a time but this is a bug nonetheless
topping_1 = values.pop('topping_ids_1')[0][2] if 'topping_ids_1' in values else self[:1].topping_ids_1.ids
topping_2 = values.pop('topping_ids_2')[0][2] if 'topping_ids_2' in values else self[:1].topping_ids_2.ids
topping_3 = values.pop('topping_ids_3')[0][2] if 'topping_ids_3' in values else self[:1].topping_ids_3.ids
else:
topping_1 = values['topping_ids_1'][0][2] if 'topping_ids_1' in values else []
topping_2 = values['topping_ids_2'][0][2] if 'topping_ids_2' in values else []
topping_3 = values['topping_ids_3'][0][2] if 'topping_ids_3' in values else []
return topping_1 + topping_2 + topping_3
@api.constrains('topping_ids_1', 'topping_ids_2', 'topping_ids_3')
def _check_topping_quantity(self):
errors = {
'1_more': _('You should order at least one %s'),
'1': _('You have to order one and only one %s'),
}
for line in self:
for index in range(1, 4):
availability = line['available_toppings_%s' % index]
quantity = line['topping_quantity_%s' % index]
toppings = line['topping_ids_%s' % index].filtered(lambda x: x.topping_category == index)
label = line['topping_label_%s' % index]
if availability and quantity != '0_more':
check = bool(len(toppings) == 1 if quantity == '1' else toppings)
if not check:
raise ValidationError(errors[quantity] % label)
@api.model
def create(self, values):
lines = self._find_matching_lines({
**values,
'toppings': self._extract_toppings(values),
})
if lines:
# YTI FIXME This will update multiple lines in the case there are multiple
# matching lines which should not happen through the interface
lines.update_quantity(1)
return lines[:1]
return super().create(values)
def write(self, values):
merge_needed = 'note' in values or 'topping_ids_1' in values or 'topping_ids_2' in values or 'topping_ids_3' in values
if merge_needed:
lines_to_deactivate = self.env['lunch.order']
for line in self:
# Only write on topping_ids_1 because they all share the same table
# and we don't want to remove all the records
# _extract_toppings will pop topping_ids_1, topping_ids_2 and topping_ids_3 from values
# This also forces us to invalidate the cache for topping_ids_2 and topping_ids_3 that
# could have changed through topping_ids_1 without the cache knowing about it
toppings = self._extract_toppings(values)
self.invalidate_cache(['topping_ids_2', 'topping_ids_3'])
values['topping_ids_1'] = [(6, 0, toppings)]
matching_lines = self._find_matching_lines({
'user_id': values.get('user_id', line.user_id.id),
'product_id': values.get('product_id', line.product_id.id),
'note': values.get('note', line.note or False),
'toppings': toppings,
})
if matching_lines:
lines_to_deactivate |= line
# YTI TODO Try to batch it, be careful there might be multiple matching
# lines for the same order hence quantity should not always be
# line.quantity, but rather a sum
matching_lines.update_quantity(line.quantity)
lines_to_deactivate.write({'active': False})
return super(LunchOrder, self - lines_to_deactivate).write(values)
return super().write(values)
@api.model
def _find_matching_lines(self, values):
domain = [
('user_id', '=', values.get('user_id', self.default_get(['user_id'])['user_id'])),
('product_id', '=', values.get('product_id', False)),
('date', '=', fields.Date.today()),
('note', '=', values.get('note', False)),
]
toppings = values.get('toppings', [])
return self.search(domain).filtered(lambda line: (line.topping_ids_1 | line.topping_ids_2 | line.topping_ids_3).ids == toppings)
@api.depends('topping_ids_1', 'topping_ids_2', 'topping_ids_3', 'product_id', 'quantity')
def _compute_total_price(self):
for line in self:
line.price = line.quantity * (line.product_id.price + sum((line.topping_ids_1 | line.topping_ids_2 | line.topping_ids_3).mapped('price')))
@api.depends('topping_ids_1', 'topping_ids_2', 'topping_ids_3')
def _compute_display_toppings(self):
for line in self:
toppings = line.topping_ids_1 | line.topping_ids_2 | line.topping_ids_3
line.display_toppings = ' + '.join(toppings.mapped('name'))
def update_quantity(self, increment):
for line in self.filtered(lambda line: line.state != 'confirmed'):
if line.quantity <= -increment:
# TODO: maybe unlink the order?
line.active = False
else:
line.quantity += increment
self._check_wallet()
def add_to_cart(self):
"""
This method currently does nothing, we currently need it in order to
be able to reuse this model in place of a wizard
"""
# YTI FIXME: Find a way to drop this.
return True
def _check_wallet(self):
self.flush()
for line in self:
if self.env['lunch.cashmove'].get_wallet_balance(line.user_id) < 0:
raise ValidationError(_('Your wallet does not contain enough money to order that. To add some money to your wallet, please contact your lunch manager.'))
def action_order(self):
if self.filtered(lambda line: not line.product_id.active):
raise ValidationError(_('Product is no longer available.'))
self.write({'state': 'ordered'})
self._check_wallet()
def action_confirm(self):
self.write({'state': 'confirmed'})
def action_cancel(self):
self.write({'state': 'cancelled'})
|
adamnovak/client
|
refs/heads/develop
|
shell/bii.py
|
5
|
import sys
import os
import shlex
import traceback
from biicode.client.command.executor import ToolExecutor
from biicode.client.command.tool_catalog import ToolCatalog
from biicode.common.exception import BiiException
from biicode.client.shell.userio import UserIO
from biicode.common.utils.bii_logging import logger
from biicode.client.command.biicommand import BiiCommand
from biicode.client.dev.cpp.cpptoolchain import CPPToolChain
from biicode.client.shell.biistream import BiiOutputStream
from biicode.common.output_stream import OutputStream, INFO
from biicode.client.setups.setup_commands import SetupCommands
from biicode.client.dev.hardware.raspberry_pi.rpitoolchain import RPiToolChain
from biicode.client.dev.hardware.arduino.arduinotoolchain import ArduinoToolChain
from biicode.client.shell.updates_manager import UpdatesStore, UpdatesManager
from biicode.common.model.server_info import ClientVersion
from biicode.client.exception import ObsoleteClient
from biicode.client.conf import BII_RESTURL
from biicode.client.rest.bii_rest_api_client import BiiRestApiClient
from biicode.client.dev.node.nodetoolchain import NodeToolChain
from biicode.client.workspace.bii_paths import BiiPaths
from biicode.client.workspace.hive_disk_image import HiveDiskImage
from biicode.client.workspace.user_cache import UserCache
class Bii(object):
'''Entry point class for bii executable'''
def __init__(self, user_io, current_folder, user_biicode_folder):
self.user_io = user_io
self.bii_paths = BiiPaths(current_folder, user_biicode_folder)
self.user_cache = UserCache(self.bii_paths.user_bii_home)
toolcatalog = ToolCatalog(BiiCommand, tools=[CPPToolChain,
RPiToolChain,
SetupCommands,
ArduinoToolChain,
NodeToolChain])
self.executor = ToolExecutor(self, toolcatalog)
self._biiapi = None
@property
def hive_disk_image(self):
# not able to keep it persistent, as tests make a database locked operational error
return HiveDiskImage(self.bii_paths, self.user_cache, self.user_io.out)
@property
def biiapi(self):
if self._biiapi is None:
from biicode.client.api.biiapi_proxy import BiiAPIProxy
from biicode.client.api.biiapi_auth_manager import BiiApiAuthManager
auth_manager = BiiApiAuthManager(self._restapi, self.user_io, self.user_cache.localdb)
self._biiapi = BiiAPIProxy(self.user_cache.localdb, auth_manager, self.user_io)
return self._biiapi
@property
def _restapi(self):
return BiiRestApiClient(BII_RESTURL)
def execute(self, argv):
'''Executes user provided command. Eg. bii run:cpp'''
errors = False
try:
if isinstance(argv, basestring): # To make tests easier to write
argv = shlex.split(argv)
self.executor.execute(argv) # Executor only raises not expected Exceptions
except (KeyboardInterrupt, SystemExit) as e:
logger.debug('Execution terminated: %s', e)
errors = True
except BiiException as e:
errors = True
self.user_io.out.error(str(e))
except Exception as e:
tb = traceback.format_exc()
logger.error(tb)
errors = True
self.user_io.out.error('Unexpected Exception\n %s' % e)
self.user_io.out.error('Error executing command.\n'
'\tCheck the documentation in http://docs.biicode.com\n'
'\tor ask in the forum http://forum.biicode.com\n')
return errors
def run_main(args, user_io=None, current_folder=None, user_folder=None, biiapi_client=None):
try:
user_folder = user_folder or os.path.expanduser("~")
biicode_folder = os.path.join(user_folder, '.biicode')
current_folder = current_folder or os.getcwd()
user_io = user_io or create_user_io(biicode_folder)
bii = Bii(user_io, current_folder, biicode_folder)
# Update manager doesn't need proxy nor authentication to call get_server_info
biiapi_client = biiapi_client or bii.biiapi
updates_manager = get_updates_manager(biiapi_client, biicode_folder)
try: # Check for updates
updates_manager.check_for_updates(bii.user_io.out)
except ObsoleteClient as e:
bii.user_io.out.error(e.message)
return int(True)
errors = bii.execute(args)
return int(errors)
except OSError as e:
print str(e)
return 1
def create_user_io(biicode_folder):
"""Creates the bii folder and init user_io with outputstream and logfile"""
try:
os.makedirs(biicode_folder)
except:
pass
log_file = os.path.join(biicode_folder, 'bii.log')
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
from colorama import init
init()
OutputStream.color = True
user_io = UserIO(sys.stdin, BiiOutputStream(sys.stdout, log_file, level=INFO))
return user_io
def get_updates_manager(biiapi, biicode_folder):
file_store = os.path.join(biicode_folder, ".remote_version_info")
updates_store = UpdatesStore(file_store)
current_client = ClientVersion(get_current_client_version())
manager = UpdatesManager(updates_store, biiapi, current_client)
return manager
def get_current_client_version():
from biicode.common import __version__ as current_version
return current_version
def main(args):
error = run_main(args)
sys.exit(error)
if __name__ == '__main__':
main(sys.argv[1:])
|
palaniyappanBala/rekall
|
refs/heads/master
|
rekall-core/rekall/plugins/windows/registry/registry.py
|
4
|
# Rekall Memory Forensics
# Copyright (c) 2012 Michael Cohen <scudette@gmail.com>
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This is the registry parser.
We parse registry structures from files or memory.
"""
__author__ = ("Michael Cohen <scudette@gmail.com> based on original code "
"by Brendan Dolan-Gavitt")
# pylint: disable=protected-access
import ntpath
import re
import struct
from rekall import addrspace
from rekall import obj
from rekall import utils
from rekall.plugins.windows import common
registry_overlays = {
'_CM_KEY_NODE': [None, {
'Parent': [None, ['Pointer32', dict(
target='_CM_KEY_NODE'
)]],
'Flags': [None, ['Flags', dict(bitmap={
"KEY_IS_VOLATILE": 0,
"KEY_HIVE_EXIT": 1,
"KEY_HIVE_ENTRY": 2,
"KEY_NO_DELETE": 3,
"KEY_SYM_LINK": 4,
"KEY_COMP_NAME": 5,
"KEY_PREFEF_HANDLE": 6,
"KEY_VIRT_MIRRORED": 7,
"KEY_VIRT_TARGET": 8,
"KEY_VIRTUAL_STORE": 9,
})]],
'Signature' : [None, ['String', dict(length=2)]],
'LastWriteTime' : [None, ['WinFileTime', {}]],
'Name' : [None, ['String', dict(length=lambda x: x.NameLength)]],
}],
'_CM_KEY_VALUE': [None, {
'Signature' : [None, ['String', dict(length=2)]],
'Name' : [None, ['String', dict(length=lambda x: x.NameLength)]],
'Type': [None, ['Enumeration', dict(choices={
0: "REG_NONE",
1: "REG_SZ",
2: "REG_EXPAND_SZ",
3: "REG_BINARY",
4: "REG_DWORD",
5: "REG_DWORD_BIG_ENDIAN",
6: "REG_LINK",
7: "REG_MULTI_SZ",
8: "REG_RESOURCE_LIST",
9: "REG_FULL_RESOURCE_DESCRIPTOR",
10: "REG_RESOURCE_REQUIREMENTS_LIST",
11: "REG_QWORD",
})]]
}],
'_CM_NAME_CONTROL_BLOCK' : [None, {
'Name' : [None, ['String', dict(length=lambda x: x.NameLength)]],
}],
'_CHILD_LIST' : [None, {
'List' : [None, ['Pointer32', dict(
target="Array",
target_args=dict(
count=lambda x: x.Count,
target="Pointer32",
target_args=dict(
target="_CM_KEY_VALUE"
)
)
)]],
}],
'_CM_KEY_INDEX' : [None, {
'Signature' : [None, ['String', dict(length=2)]],
'List' : [None, ["Array", dict(
count=lambda x: x.Count.v() * 2,
target="Pointer32",
target_args=dict(
target='_CM_KEY_NODE'
)
)]],
}],
}
class _HMAP_ENTRY(obj.Struct):
"""Windows uses this to track registry HBIN cells mapped into memory."""
@property
def BlockAddress(self):
"""Compatibility field for Windows 7 and Windows 10."""
if "BlockAddress" in self.members:
return self.m("BlockAddress")
# Windows 10 uses a different field.
return self.PermanentBinAddress & 0xfffffffffff0
class Pointer32(obj.Pointer):
"""A 32 bit pointer (Even in 64 bit arch).
These kinds of pointers are used most commonly in the Registry code which
always treats the hives as 32 bit address spaces.
"""
def __init__(self, **kwargs):
super(Pointer32, self).__init__(**kwargs)
self._proxy = self._proxy.cast("unsigned int")
class HiveBaseAddressSpace(addrspace.PagedReader):
__abstract = True
BLOCK_SIZE = PAGE_SIZE = 0x1000
class HiveFileAddressSpace(HiveBaseAddressSpace):
"""Translate between hive addresses and a flat file address space.
This is suitable for reading regular registry files. It should be
stacked over the FileAddressSpace.
"""
def __init__(self, **kwargs):
super(HiveFileAddressSpace, self).__init__(**kwargs)
self.as_assert(self.base, "Must stack on top of a file.")
self.as_assert(self.base.read(0, 4) == "regf", "File does not look "
"like a registry file.")
def vtop(self, vaddr):
return vaddr + self.PAGE_SIZE + 4
@property
def Name(self):
return self.base
class HiveAddressSpace(HiveBaseAddressSpace):
CI_TYPE_MASK = 0x80000000
CI_TYPE_SHIFT = 0x1F
CI_TABLE_MASK = 0x7FE00000
CI_TABLE_SHIFT = 0x15
CI_BLOCK_MASK = 0x1FF000
CI_BLOCK_SHIFT = 0x0C
CI_OFF_MASK = 0x0FFF
CI_OFF_SHIFT = 0x0
def __init__(self, hive_addr=None, profile=None, **kwargs):
"""Translate between hive addresses and virtual memory addresses.
This must be constructed over the kernel virtual memory.
Args:
hive_addr: The virtual address of the _CMHIVE object.
profile: A profile which holds registry symbols.
"""
super(HiveAddressSpace, self).__init__(**kwargs)
self.as_assert(hive_addr, "Hive offset not provided.")
self.as_assert(self.base, "Must be layered on kernel address space.")
self.profile = RekallRegisteryImplementation(
profile or self.session.profile)
self.hive = self.profile._CMHIVE(offset=hive_addr, vm=self.base)
self.baseblock = self.hive.Hive.BaseBlock.v()
self.flat = self.hive.Hive.Flat.v() > 0
self.storage = self.hive.Hive.Storage
# This is a quick lookup for blocks.
self.block_cache = utils.FastStore(max_size=1000)
self.logging = self.session.logging.getChild("addrspace.hive")
def vtop(self, vaddr):
vaddr = int(vaddr)
# If the hive is listed as "flat", it is all contiguous in memory
# so we can just calculate it relative to the base block.
if self.flat:
return self.baseblock + vaddr + self.BLOCK_SIZE + 4
ci_type = (vaddr & self.CI_TYPE_MASK) >> self.CI_TYPE_SHIFT
ci_table = (vaddr & self.CI_TABLE_MASK) >> self.CI_TABLE_SHIFT
ci_block = (vaddr & self.CI_BLOCK_MASK) >> self.CI_BLOCK_SHIFT
ci_off = (vaddr & self.CI_OFF_MASK) >> self.CI_OFF_SHIFT
try:
block = self.block_cache.Get((ci_type, ci_table, ci_block))
except KeyError:
block = self.storage[ci_type].Map.Directory[ci_table].Table[
ci_block].BlockAddress
self.block_cache.Put((ci_type, ci_table, ci_block), block)
return block + ci_off + 4
def save(self):
"""A generator of registry data in linear form.
This can be used to write a registry file.
Yields:
blocks of data in order.
"""
baseblock = self.base.read(self.baseblock, self.BLOCK_SIZE)
if baseblock:
yield baseblock
else:
yield "\0" * self.BLOCK_SIZE
length = self.hive.Hive.Storage[0].Length.v()
for i in xrange(0, length, self.BLOCK_SIZE):
paddr = self.vtop(i)
if not paddr:
self.logging.warn("No mapping found for index {0:x}, "
"filling with NULLs".format(i))
data = '\0' * self.BLOCK_SIZE
else:
paddr = paddr - 4
data = self.base.read(paddr, self.BLOCK_SIZE)
if not data:
self.logging.warn("Physical layer returned None for index "
"{0:x}, filling with NULL".format(i))
data = '\0' * self.BLOCK_SIZE
yield data
def stats(self, stable=True):
if stable:
stor = 0
ci = lambda x: x
else:
stor = 1
ci = lambda x: x | 0x80000000
length = self.hive.Hive.Storage[stor].Length.v()
total_blocks = length / self.BLOCK_SIZE
bad_blocks_reg = 0
bad_blocks_mem = 0
for i in xrange(0, length, self.BLOCK_SIZE):
i = ci(i)
data = None
paddr = self.vtop(i) - 4
if paddr:
data = self.base.read(paddr, self.BLOCK_SIZE)
else:
bad_blocks_reg += 1
continue
if not data:
bad_blocks_mem += 1
print("{0} bytes in hive.".format(length))
print(("{0} blocks not loaded by CM, {1} blocks "
"paged out, {2} total blocks.".format(
bad_blocks_reg, bad_blocks_mem, total_blocks)))
if total_blocks:
print("Total of {0:.2f}% of hive unreadable.".format(
((bad_blocks_reg + bad_blocks_mem) / float(total_blocks)
) * 100))
return (bad_blocks_reg, bad_blocks_mem, total_blocks)
@property
def Name(self):
return self.hive.Name
class _CMHIVE(obj.Struct):
@property
def Name(self):
name = "[no name]"
try:
name = (self.FileFullPath.v() or self.FileUserName.v() or
self.HiveRootPath.v())
except AttributeError:
pass
object_tree_plugin = self.obj_session.plugins.object_tree()
return u"{0} @ {1:#010x}".format(
object_tree_plugin.FileNameWithDrive(name) or "Unnamed",
self.obj_offset)
class _CM_KEY_NODE(obj.Struct):
"""A registry key."""
NK_SIG = "nk"
VK_SIG = "vk"
def open_subkey(self, subkey_name):
"""Opens our direct child."""
for subkey in self.subkeys():
if unicode(subkey.Name).lower() == subkey_name.lower():
return subkey
return obj.NoneObject("Couldn't find subkey {0} of {1}",
subkey_name, self.Name)
def open_value(self, value_name):
"""Opens our direct child."""
for value in self.values():
if value.Name == value_name:
return value
return obj.NoneObject("Couldn't find subkey {0} of {1}",
value_name, self.Name)
def subkeys(self):
"""Enumeate all subkeys of this key.
How are subkeys stored in each key record?
There are usually two subkey lists - these are pointers to _CM_KEY_INDEX
which are just a list of pointers to other subkeys.
"""
# There are multiple lists of subkeys:
sk_lists = self.SubKeyLists
for list_index, count in enumerate(self.SubKeyCounts):
if count > 0:
sk_offset = sk_lists[list_index]
for subkey in self.obj_profile._CM_KEY_INDEX(
offset=sk_offset, vm=self.obj_vm, parent=self):
yield subkey
def values(self):
"""Enumerate all the values of the key."""
for value_ptr in self.ValueList.List.dereference():
value = value_ptr.dereference()
if value.Signature == self.VK_SIG:
yield value
@property
def Path(self):
"""Traverse our parent objects to print the full path of this key."""
path = []
key = self
while key:
try:
path.append(unicode(key.Name))
except AttributeError:
pass
key = key.obj_parent
return "/".join(reversed(path))
@property
def Name(self):
"""The name of the key is actually a unicode object.
This is encoded either in ascii or utf16 according to the Flags.
"""
if self.Flags.KEY_COMP_NAME:
return self.obj_profile.String(
vm=self.obj_vm, offset=self.obj_profile.get_obj_offset(
self.obj_type, "Name") + self.obj_offset,
length=self.NameLength)
else:
return self.obj_profile.UnicodeString(
vm=self.obj_vm, offset=self.obj_profile.get_obj_offset(
self.obj_type, "Name") + self.obj_offset,
length=self.NameLength, encoding="utf-16")
class _CM_KEY_INDEX(obj.Struct):
"""This is a list of pointers to key nodes.
This work different depending on the Signature.
"""
LH_SIG = "lh"
LF_SIG = "lf"
RI_SIG = "ri"
LI_SIG = "li"
NK_SIG = "nk"
def __iter__(self):
"""Iterate over all the keys in the index.
Depending on our type (from the Signature) we use different methods."""
if self.Signature == self.LH_SIG or self.Signature == self.LF_SIG:
# The List contains alternating pointers/hash elements here. We do
# not care about the hash at all, so we skip every other entry. See
# http://www.sentinelchicken.com/data/TheWindowsNTRegistryFileFormat.pdf
key_list = self.List
for i in xrange(self.Count * 2):
nk = key_list[i]
if nk.Signature == self.NK_SIG:
yield nk
elif self.Signature == self.RI_SIG:
for i in xrange(self.Count):
# This is a pointer to another _CM_KEY_INDEX
for subkey in self.obj_profile.Object(
"Pointer32", offset=self.List[i].v(),
vm=self.obj_vm, parent=self.obj_parent,
target="_CM_KEY_INDEX"):
if subkey.Signature == self.NK_SIG:
yield subkey
elif self.Signature == self.LI_SIG:
key_list = self.List
for i in xrange(self.Count):
nk = key_list[i]
if nk.Signature == self.NK_SIG:
yield nk
class _CM_KEY_VALUE(obj.Struct):
"""A registry value."""
value_formats = {"REG_DWORD": "<L",
"REG_DWORD_BIG_ENDIAN": ">L",
"REG_QWORD": "<Q"}
@property
def DecodedData(self):
"""Returns the data for this key decoded according to the type."""
# When the data length is 0x80000000, the value is stored in the type
# (as a REG_DWORD).
if self.DataLength == 0x80000000:
return self.Type.v()
# If the high bit is set, the data is stored inline
elif self.DataLength & 0x80000000:
return self._decode_data(self.obj_vm.read(
self.m("Data").obj_offset, self.DataLength & 0x7FFFFFFF))
elif self.DataLength > 0x4000:
return obj.NoneObject("Big data not supported.")
big_data = self.obj_profile._CM_BIG_DATA(
self.Data, vm=self.obj_vm)
return self._decode_data(big_data.Data)
else:
return self._decode_data(self.obj_vm.read(
int(self.m("Data")), self.DataLength))
def _decode_data(self, data):
"""Decode the data according to our type."""
valtype = str(self.Type)
if valtype in ["REG_DWORD", "REG_DWORD_BIG_ENDIAN", "REG_QWORD"]:
if len(data) != struct.calcsize(self.value_formats[valtype]):
return obj.NoneObject(
"Value data did not match the expected data "
"size for a {0}".format(valtype))
if valtype in ["REG_SZ", "REG_EXPAND_SZ", "REG_LINK"]:
data = data.decode('utf-16-le', "ignore")
elif valtype == "REG_MULTI_SZ":
data = data.decode('utf-16-le', "ignore").split('\0')
elif valtype in ["REG_DWORD", "REG_DWORD_BIG_ENDIAN", "REG_QWORD"]:
data = struct.unpack(self.value_formats[valtype], data)[0]
return data
def RekallRegisteryImplementation(profile):
"""The standard rekall registry parsing subsystem."""
profile.add_classes(dict(
_CM_KEY_NODE=_CM_KEY_NODE, _CM_KEY_INDEX=_CM_KEY_INDEX,
_CM_KEY_VALUE=_CM_KEY_VALUE, _CMHIVE=_CMHIVE,
_HMAP_ENTRY=_HMAP_ENTRY,
Pointer32=Pointer32
))
profile.add_overlay(registry_overlays)
return profile
class Registry(object):
"""A High level class to abstract access to the registry hive."""
ROOT_INDEX = 0x20
VK_SIG = "vk"
BIG_DATA_MAGIC = 0x3fd8
def __init__(self, session=None, profile=None, address_space=None,
filename=None, stable=True):
"""Abstract a registry hive.
Args:
session: An optional session object.
profile: A profile to operate on. If not provided we use
session.profile.
address_space: An instance of the HiveBaseAddressSpace.
filename: If the registry exists in a file, specify the filename to
save instantiating the address_space.
stable: Should we try to open the unstable registry area?
"""
self.session = session
self.profile = RekallRegisteryImplementation(
profile or session.profile)
if filename is not None:
base_as = HiveFileAddressSpace.classes['FileAddressSpace'](
filename=filename, session=session)
address_space = HiveFileAddressSpace(base=base_as)
self.address_space = address_space
root_index = self.ROOT_INDEX
if not stable:
root_index = self.ROOT_INDEX | 0x80000000
self.root = self.profile.Object(
"_CM_KEY_NODE", offset=root_index, vm=address_space)
@property
def Name(self):
"""Return the name of the registry."""
return self.address_space.Name
def open_key(self, key=""):
"""Opens a key.
Args:
key: A string path to the key (separated with / or \\) or a list of
path components (useful if the keyname contains /).
"""
if isinstance(key, basestring):
# / can be part of the key name...
key = filter(None, re.split(r"[\\/]", key))
result = self.root
for component in key:
result = result.open_subkey(component)
return result
def open_value(self, path):
key = self.open_key(ntpath.dirname(path))
return key.open_value(ntpath.basename(path))
def CurrentControlSet(self):
"""Return the key for the CurrentControlSet."""
current = self.open_value("Select/Current").DecodedData
if not current:
current = 1
return self.open_key("ControlSet00%s" % current)
class RegistryHive(Registry):
def __init__(self, hive_offset=None, kernel_address_space=None,
profile=None,
session=None, **kwargs):
"""A Registry hive instantiated from the hive offsets.
Args:
hive_offset: The virtual offset of the hive.
kernel_address_space: The kernel address space.
"""
if session:
profile = profile or session.profile
kernel_address_space = (kernel_address_space or
session.kernel_address_space)
hive_address_space = HiveAddressSpace(base=kernel_address_space,
hive_addr=hive_offset,
profile=profile,
session=session)
super(RegistryHive, self).__init__(
session=session, profile=profile, address_space=hive_address_space,
**kwargs)
class RegistryPlugin(common.WindowsCommandPlugin):
"""A generic registry plugin."""
__abstract = True
@classmethod
def args(cls, parser):
super(RegistryPlugin, cls).args(parser)
parser.add_argument(
"--hive-offsets", default=None, type="ArrayIntParser",
help="A list of hive offsets as found by hivelist. "
"If not provided we call hivelist ourselves and list "
"the keys on all hives.")
parser.add_argument("--hive_regex", default=None,
help="A regex to filter hive names."
"If not provided we use all hives.")
def __init__(self, hive_offsets=None, hive_regex=None, **kwargs):
"""Operate on in memory registry hives.
Args:
hive_offset: A list of hive offsets as found by hivelist (virtual
address). If not provided we call hivescan ourselves and list the
key on all hives.
"""
super(RegistryPlugin, self).__init__(**kwargs)
# Install our specific implementation of registry support.
RekallRegisteryImplementation(self.profile)
self.hive_offsets = hive_offsets
if not self.hive_offsets:
self.hive_offsets = list(self.list_hives())
if hive_regex is not None:
hive_offsets = []
for hive in self.hive_offsets:
m = re.search(hive_regex, utils.SmartUnicode(hive.Name), re.I)
if m:
hive_offsets.append(hive)
self.hive_offsets = hive_offsets
def list_hives(self):
hive_list = self.profile.get_constant_object(
"CmpHiveListHead", "_LIST_ENTRY")
return hive_list.list_of_type("_CMHIVE", "HiveList")
class Hives(RegistryPlugin):
"""List all the registry hives on the system."""
name = "hives"
def render(self, renderer):
renderer.table_header([("Offset", "offset", "[addrpad]"),
("Name", "name", "")])
for hive in self.list_hives():
renderer.table_row(hive, hive.Name)
|
ofayans/freeipa
|
refs/heads/master
|
ipaserver/plugins/certprofile.py
|
1
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import re
from ipalib import api, Bool, Str
from ipalib.plugable import Registry
from .baseldap import (
LDAPObject, LDAPSearch, LDAPCreate,
LDAPDelete, LDAPUpdate, LDAPRetrieve)
from ipalib.request import context
from ipalib import ngettext
from ipalib.text import _
from ipapython.dogtag import INCLUDED_PROFILES
from ipapython.version import API_VERSION
from ipalib import errors
__doc__ = _("""
Manage Certificate Profiles
Certificate Profiles are used by Certificate Authority (CA) in the signing of
certificates to determine if a Certificate Signing Request (CSR) is acceptable,
and if so what features and extensions will be present on the certificate.
The Certificate Profile format is the property-list format understood by the
Dogtag or Red Hat Certificate System CA.
PROFILE ID SYNTAX:
A Profile ID is a string without spaces or punctuation starting with a letter
and followed by a sequence of letters, digits or underscore ("_").
EXAMPLES:
Import a profile that will not store issued certificates:
ipa certprofile-import ShortLivedUserCert \\
--file UserCert.profile --desc "User Certificates" \\
--store=false
Delete a certificate profile:
ipa certprofile-del ShortLivedUserCert
Show information about a profile:
ipa certprofile-show ShortLivedUserCert
Save profile configuration to a file:
ipa certprofile-show caIPAserviceCert --out caIPAserviceCert.cfg
Search for profiles that do not store certificates:
ipa certprofile-find --store=false
PROFILE CONFIGURATION FORMAT:
The profile configuration format is the raw property-list format
used by Dogtag Certificate System. The XML format is not supported.
The following restrictions apply to profiles managed by FreeIPA:
- When importing a profile the "profileId" field, if present, must
match the ID given on the command line.
- The "classId" field must be set to "caEnrollImpl"
- The "auth.instance_id" field must be set to "raCertAuth"
- The "certReqInputImpl" input class and "certOutputImpl" output
class must be used.
""")
register = Registry()
def ca_enabled_check():
"""Raise NotFound if CA is not enabled.
This function is defined in multiple plugins to avoid circular imports
(cert depends on certprofile, so we cannot import cert here).
"""
if not api.Command.ca_is_enabled()['result']:
raise errors.NotFound(reason=_('CA is not configured'))
profile_id_pattern = re.compile('^[a-zA-Z]\w*$')
def validate_profile_id(ugettext, value):
"""Ensure profile ID matches form required by CA."""
if profile_id_pattern.match(value) is None:
return _('invalid Profile ID')
else:
return None
@register()
class certprofile(LDAPObject):
"""
Certificate Profile object.
"""
container_dn = api.env.container_certprofile
object_name = _('Certificate Profile')
object_name_plural = _('Certificate Profiles')
object_class = ['ipacertprofile']
default_attributes = [
'cn', 'description', 'ipacertprofilestoreissued'
]
search_attributes = [
'cn', 'description', 'ipacertprofilestoreissued'
]
label = _('Certificate Profiles')
label_singular = _('Certificate Profile')
takes_params = (
Str('cn', validate_profile_id,
primary_key=True,
cli_name='id',
label=_('Profile ID'),
doc=_('Profile ID for referring to this profile'),
),
Str('config',
label=_('Profile configuration'),
flags={'virtual_attribute', 'no_create', 'no_update', 'no_search'},
),
Str('description',
required=True,
cli_name='desc',
label=_('Profile description'),
doc=_('Brief description of this profile'),
),
Bool('ipacertprofilestoreissued',
default=True,
cli_name='store',
label=_('Store issued certificates'),
doc=_('Whether to store certs issued using this profile'),
),
)
permission_filter_objectclasses = ['ipacertprofile']
managed_permissions = {
'System: Read Certificate Profiles': {
'replaces_global_anonymous_aci': True,
'ipapermbindruletype': 'all',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'cn',
'description',
'ipacertprofilestoreissued',
'objectclass',
},
},
'System: Import Certificate Profile': {
'ipapermright': {'add'},
'replaces': [
'(target = "ldap:///cn=*,cn=certprofiles,cn=ca,$SUFFIX")(version 3.0;acl "permission:Import Certificate Profile";allow (add) groupdn = "ldap:///cn=Import Certificate Profile,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'CA Administrator'},
},
'System: Delete Certificate Profile': {
'ipapermright': {'delete'},
'replaces': [
'(target = "ldap:///cn=*,cn=certprofiles,cn=ca,$SUFFIX")(version 3.0;acl "permission:Delete Certificate Profile";allow (delete) groupdn = "ldap:///cn=Delete Certificate Profile,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'CA Administrator'},
},
'System: Modify Certificate Profile': {
'ipapermright': {'write'},
'ipapermdefaultattr': {
'cn',
'description',
'ipacertprofilestoreissued',
},
'replaces': [
'(targetattr = "cn || description || ipacertprofilestoreissued")(target = "ldap:///cn=*,cn=certprofiles,cn=ca,$SUFFIX")(version 3.0;acl "permission:Modify Certificate Profile";allow (write) groupdn = "ldap:///cn=Modify Certificate Profile,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'CA Administrator'},
},
}
@register()
class certprofile_find(LDAPSearch):
__doc__ = _("Search for Certificate Profiles.")
msg_summary = ngettext(
'%(count)d profile matched', '%(count)d profiles matched', 0
)
def execute(self, *args, **kwargs):
ca_enabled_check()
return super(certprofile_find, self).execute(*args, **kwargs)
@register()
class certprofile_show(LDAPRetrieve):
__doc__ = _("Display the properties of a Certificate Profile.")
takes_options = LDAPRetrieve.takes_options + (
Str('out?',
doc=_('Write profile configuration to file'),
),
)
def execute(self, *keys, **options):
ca_enabled_check()
result = super(certprofile_show, self).execute(*keys, **options)
if 'out' in options:
with self.api.Backend.ra_certprofile as profile_api:
result['result']['config'] = profile_api.read_profile(keys[0])
return result
@register()
class certprofile_import(LDAPCreate):
__doc__ = _("Import a Certificate Profile.")
msg_summary = _('Imported profile "%(value)s"')
takes_options = (
Str(
'file',
label=_('Filename of a raw profile. The XML format is not supported.'),
cli_name='file',
flags=('virtual_attribute',),
noextrawhitespace=False,
),
)
PROFILE_ID_PATTERN = re.compile('^profileId=([a-zA-Z]\w*)', re.MULTILINE)
def pre_callback(self, ldap, dn, entry, entry_attrs, *keys, **options):
ca_enabled_check()
context.profile = options['file']
match = self.PROFILE_ID_PATTERN.search(options['file'])
if match is None:
# no profileId found, use CLI value as profileId.
context.profile = u'profileId=%s\n%s' % (keys[0], context.profile)
elif keys[0] != match.group(1):
raise errors.ValidationError(name='file',
error=_("Profile ID '%(cli_value)s' does not match profile data '%(file_value)s'")
% {'cli_value': keys[0], 'file_value': match.group(1)}
)
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
"""Import the profile into Dogtag and enable it.
If the operation fails, remove the LDAP entry.
"""
try:
with self.api.Backend.ra_certprofile as profile_api:
profile_api.create_profile(context.profile)
profile_api.enable_profile(keys[0])
except:
# something went wrong ; delete entry
ldap.delete_entry(dn)
raise
return dn
@register()
class certprofile_del(LDAPDelete):
__doc__ = _("Delete a Certificate Profile.")
msg_summary = _('Deleted profile "%(value)s"')
def pre_callback(self, ldap, dn, *keys, **options):
ca_enabled_check()
if keys[0] in [p.profile_id for p in INCLUDED_PROFILES]:
raise errors.ValidationError(name='profile_id',
error=_("Predefined profile '%(profile_id)s' cannot be deleted")
% {'profile_id': keys[0]}
)
return dn
def post_callback(self, ldap, dn, *keys, **options):
with self.api.Backend.ra_certprofile as profile_api:
profile_api.disable_profile(keys[0])
profile_api.delete_profile(keys[0])
return dn
@register()
class certprofile_mod(LDAPUpdate):
__doc__ = _("Modify Certificate Profile configuration.")
msg_summary = _('Modified Certificate Profile "%(value)s"')
takes_options = LDAPUpdate.takes_options + (
Str(
'file?',
label=_('File containing profile configuration'),
cli_name='file',
flags=('virtual_attribute',),
noextrawhitespace=False,
),
)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
ca_enabled_check()
# Once a profile id is set it cannot be changed
if 'cn' in entry_attrs:
raise errors.ProtectedEntryError(label='certprofile', key=keys[0],
reason=_('Certificate profiles cannot be renamed'))
if 'file' in options:
with self.api.Backend.ra_certprofile as profile_api:
profile_api.disable_profile(keys[0])
try:
profile_api.update_profile(keys[0], options['file'])
finally:
profile_api.enable_profile(keys[0])
return dn
def execute(self, *keys, **options):
try:
return super(certprofile_mod, self).execute(*keys, **options)
except errors.EmptyModlist:
if 'file' in options:
# The profile data in Dogtag was updated.
# Do not fail; return result of certprofile-show instead
return self.api.Command.certprofile_show(keys[0],
version=API_VERSION)
else:
# This case is actually an error; re-raise
raise
|
akidhruv/Computational_Cauldron
|
refs/heads/master
|
FORTRAN/INS_PyF/plot.py
|
1
|
import matplotlib.pyplot as plt
import numpy as np
M=20+1
N=20+1
k = 2
d = 2
X=np.zeros((N*d,M*k),dtype=float)
Y=np.zeros((N*d,M*k),dtype=float)
U=np.zeros((N*d,M*k),dtype=float)
V=np.zeros((N*d,M*k),dtype=float)
P=np.zeros((N*d,M*k),dtype=float)
T=np.zeros((N*d,M*k),dtype=float)
for i in range(k*d):
data=np.loadtxt('LidData0%d.dat' % i)
x = data[:,0]
y = data[:,1]
u = data[:,2]
v = data[:,3]
p = data[:,4]
t = data[:,5]
x = np.reshape(x,[N,M])
y = np.reshape(y,[N,M])
u = np.reshape(u,[N,M])
v = np.reshape(v,[N,M])
p = np.reshape(p,[N,M])
t = np.reshape(t,[N,M])
X[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=x
Y[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=y
U[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=u
V[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=v
P[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=p
T[(i/k)*N:(i/k)*N+N,(i%k)*M:(i%k)*M+M]=t
plt.figure()
plt.title('Resultant Velocity')
plt.contourf(X,Y,np.sqrt(U**2+V**2),density=5)
plt.axis('equal')
plt.figure()
plt.title('Pressure')
plt.contourf(X,Y,P,density=5)
plt.axis('equal')
plt.figure()
plt.title('Temperature')
plt.contourf(X,Y,T,density=5)
plt.axis('equal')
"""
plt.figure()
plt.contourf(X,Y,U,density=5)
plt.axis('equal')
plt.figure()
plt.contourf(X,Y,V,density=5)
plt.axis('equal')
"""
plt.show()
|
jerome-nexedi/dream
|
refs/heads/master
|
dream/simulation/RoutingQueue.py
|
4
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 11 Jul 2014
@author: Ioannis
'''
'''
Models a queue where entities can wait in order to be routed to the same server that other entities of the same parent entity have already been rooted.
if the level is reached Router object is signalled
'''
import simpy
from Queue import Queue
# ===========================================================================
# the Queue object
# ===========================================================================
class RoutingQueue(Queue):
# =======================================================================
# checks if the Queue can dispose an entity to the following object
# it checks also who called it and returns TRUE
# only to the receiver that will give the entity.
# this is kind of slow I think got to check
# TODO: check which route the entities of the same parent entity have picked and route them the same way
# =======================================================================
def haveToDispose(self, callerObject=None):
activeObjectQueue=self.Res.users
#if we have only one possible receiver just check if the Queue holds one or more entities
if(callerObject==None):
return len(activeObjectQueue)>0
thecaller=callerObject
# local flag to control whether the callerObject can receive any of the entities in the buffers internal queue
isInRouting=False
# for each entity in the buffer
for entity in activeObjectQueue:
if thecaller==entity.receiver:
isInRouting=True
break
if not isInRouting:
for entity in activeObjectQueue:
if not entity.receiver:
isInRouting=True
break
return len(activeObjectQueue)>0 and thecaller.isInRouteOf(self) and isInRouting
#===========================================================================
# sort the entities of the queue for the receiver
# TODO should a sortEntitiesForReceiver method to bring to the front the entity that can proceed in that route
#===========================================================================
def sortEntitiesForReceiver(self, receiver=None):
activeObjectQueue=self.getActiveObjectQueue()
activeObjectQueue.sort(key=lambda x: not(x.receiver is receiver), reverse=False)
activeObjectQueue.sort(key=lambda x: x.receiver==None, reverse=True)
activeObjectQueue.sort(key=lambda x: (x.receiver is receiver), reverse=True)
# =======================================================================
# gets an entity from the predecessor that
# the predecessor index points to
# =======================================================================
def removeEntity(self, entity=None, resetFlags=True, addBlockage=True):
activeEntity=Queue.removeEntity(self, entity)
parentBatch=activeEntity.parentBatch
for subbatch in parentBatch.subBatchList:
subbatch.receiver=activeEntity.currentStation
return activeEntity
# =======================================================================
# sorts the Entities of the Queue according to the scheduling rule
# TODO: sort the entities according to the schedulingRUle and then sort them again
# bringing to the front the entities that can proceed
# =======================================================================
def sortEntities(self):
#if we have sorting according to multiple criteria we have to call the sorter many times
if self.schedulingRule=="MC":
for criterion in reversed(self.multipleCriterionList):
self.activeQSorter(criterion=criterion)
#else we just use the default scheduling rule
else:
self.activeQSorter()
# sort again according to the existence or not of receiver attribute of the entities
activeObjectQueue=self.getActiveObjectQueue()
# if no entity.receiver, then show preference to these entities
activeObjectQueue.sort(key=lambda x: x.receiver==None, reverse=True)
# if there is entity.receiver then check if it is the same as the self.receiver of the queue (if any)
activeObjectQueue.sort(key=lambda x: x.receiver==self.receiver, reverse=True)
|
silly-wacky-3-town-toon/SOURCE-COD
|
refs/heads/master
|
toontown/racing/KartShopGlobals.py
|
6
|
from direct.showbase import PythonUtil
class KartShopGlobals:
EVENTDICT = {'guiDone': 'guiDone',
'returnKart': 'returnKart',
'buyKart': 'buyAKart',
'buyAccessory': 'buyAccessory'}
KARTCLERK_TIMER = 180
MAX_KART_ACC = 16
class KartGlobals:
ENTER_MOVIE = 1
EXIT_MOVIE = 2
COUNTDOWN_TIME = 30
BOARDING_TIME = 10.0
ENTER_RACE_TIME = 6.0
ERROR_CODE = PythonUtil.Enum('success, eGeneric, eTickets, eBoardOver, eNoKart, eOccupied, eTrackClosed, eTooLate, eUnpaid')
FRONT_LEFT_SPOT = 0
FRONT_RIGHT_SPOT = 1
REAR_LEFT_SPOT = 2
REAR_RIGHT_SPOT = 3
PAD_GROUP_NUM = 4
def getPadLocation(padId):
return padId % KartGlobals.PAD_GROUP_NUM
getPadLocation = staticmethod(getPadLocation)
|
pballand/congress
|
refs/heads/master
|
thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/teststreams.py
|
5
|
from io import StringIO
import os
import unittest
import antlr3
class TestStringStream(unittest.TestCase):
"""Test case for the StringStream class."""
def testSize(self):
"""StringStream.size()"""
stream = antlr3.StringStream('foo')
self.assertEqual(stream.size(), 3)
def testIndex(self):
"""StringStream.index()"""
stream = antlr3.StringStream('foo')
self.assertEqual(stream.index(), 0)
def testConsume(self):
"""StringStream.consume()"""
stream = antlr3.StringStream('foo\nbar')
stream.consume() # f
self.assertEqual(stream.index(), 1)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.line, 1)
stream.consume() # o
self.assertEqual(stream.index(), 2)
self.assertEqual(stream.charPositionInLine, 2)
self.assertEqual(stream.line, 1)
stream.consume() # o
self.assertEqual(stream.index(), 3)
self.assertEqual(stream.charPositionInLine, 3)
self.assertEqual(stream.line, 1)
stream.consume() # \n
self.assertEqual(stream.index(), 4)
self.assertEqual(stream.charPositionInLine, 0)
self.assertEqual(stream.line, 2)
stream.consume() # b
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.line, 2)
stream.consume() # a
self.assertEqual(stream.index(), 6)
self.assertEqual(stream.charPositionInLine, 2)
self.assertEqual(stream.line, 2)
stream.consume() # r
self.assertEqual(stream.index(), 7)
self.assertEqual(stream.charPositionInLine, 3)
self.assertEqual(stream.line, 2)
stream.consume() # EOF
self.assertEqual(stream.index(), 7)
self.assertEqual(stream.charPositionInLine, 3)
self.assertEqual(stream.line, 2)
stream.consume() # EOF
self.assertEqual(stream.index(), 7)
self.assertEqual(stream.charPositionInLine, 3)
self.assertEqual(stream.line, 2)
def testReset(self):
"""StringStream.reset()"""
stream = antlr3.StringStream('foo')
stream.consume()
stream.consume()
stream.reset()
self.assertEqual(stream.index(), 0)
self.assertEqual(stream.line, 1)
self.assertEqual(stream.charPositionInLine, 0)
self.assertEqual(stream.LT(1), 'f')
def testLA(self):
"""StringStream.LA()"""
stream = antlr3.StringStream('foo')
self.assertEqual(stream.LT(1), 'f')
self.assertEqual(stream.LT(2), 'o')
self.assertEqual(stream.LT(3), 'o')
stream.consume()
stream.consume()
self.assertEqual(stream.LT(1), 'o')
self.assertEqual(stream.LT(2), antlr3.EOF)
self.assertEqual(stream.LT(3), antlr3.EOF)
def testSubstring(self):
"""StringStream.substring()"""
stream = antlr3.StringStream('foobar')
self.assertEqual(stream.substring(0, 0), 'f')
self.assertEqual(stream.substring(0, 1), 'fo')
self.assertEqual(stream.substring(0, 5), 'foobar')
self.assertEqual(stream.substring(3, 5), 'bar')
def testSeekForward(self):
"""StringStream.seek(): forward"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
self.assertEqual(stream.index(), 4)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 0)
self.assertEqual(stream.LT(1), 'b')
## # not yet implemented
## def testSeekBackward(self):
## """StringStream.seek(): backward"""
## stream = antlr3.StringStream('foo\nbar')
## stream.seek(4)
## stream.seek(1)
## self.assertEqual(stream.index(), 1)
## self.assertEqual(stream.line, 1)
## self.assertEqual(stream.charPositionInLine, 1)
## self.assertEqual(stream.LA(1), 'o')
def testMark(self):
"""StringStream.mark()"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
self.assertEqual(marker, 1)
self.assertEqual(stream.markDepth, 1)
stream.consume()
marker = stream.mark()
self.assertEqual(marker, 2)
self.assertEqual(stream.markDepth, 2)
def testReleaseLast(self):
"""StringStream.release(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.release()
self.assertEqual(stream.markDepth, 1)
# release same marker again, nothing has changed
stream.release()
self.assertEqual(stream.markDepth, 1)
def testReleaseNested(self):
"""StringStream.release(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.release(marker2)
self.assertEqual(stream.markDepth, 1)
def testRewindLast(self):
"""StringStream.rewind(): last marker"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker = stream.mark()
stream.consume()
stream.consume()
stream.rewind()
self.assertEqual(stream.markDepth, 0)
self.assertEqual(stream.index(), 4)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 0)
self.assertEqual(stream.LT(1), 'b')
def testRewindNested(self):
"""StringStream.rewind(): nested"""
stream = antlr3.StringStream('foo\nbar')
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.assertEqual(stream.markDepth, 1)
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.LT(1), 'a')
class TestFileStream(unittest.TestCase):
"""Test case for the FileStream class."""
def testNoEncoding(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input1')
stream = antlr3.FileStream(path)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.assertEqual(stream.markDepth, 1)
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.LT(1), 'a')
self.assertEqual(stream.LA(1), ord('a'))
def testEncoded(self):
path = os.path.join(os.path.dirname(__file__), 'teststreams.input2')
stream = antlr3.FileStream(path)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.assertEqual(stream.markDepth, 1)
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.LT(1), 'ä')
self.assertEqual(stream.LA(1), ord('ä'))
class TestInputStream(unittest.TestCase):
"""Test case for the InputStream class."""
def testNoEncoding(self):
file = StringIO('foo\nbar')
stream = antlr3.InputStream(file)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.assertEqual(stream.markDepth, 1)
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.LT(1), 'a')
self.assertEqual(stream.LA(1), ord('a'))
def testEncoded(self):
file = StringIO('foo\nbär')
stream = antlr3.InputStream(file)
stream.seek(4)
marker1 = stream.mark()
stream.consume()
marker2 = stream.mark()
stream.consume()
marker3 = stream.mark()
stream.rewind(marker2)
self.assertEqual(stream.markDepth, 1)
self.assertEqual(stream.index(), 5)
self.assertEqual(stream.line, 2)
self.assertEqual(stream.charPositionInLine, 1)
self.assertEqual(stream.LT(1), 'ä')
self.assertEqual(stream.LA(1), ord('ä'))
class TestCommonTokenStream(unittest.TestCase):
"""Test case for the StringStream class."""
def setUp(self):
"""Setup test fixure
The constructor of CommonTokenStream needs a token source. This
is a simple mock class providing just the nextToken() method.
"""
class MockSource(object):
def __init__(self):
self.tokens = []
def makeEOFToken(self):
return antlr3.CommonToken(type=antlr3.EOF)
def nextToken(self):
if self.tokens:
return self.tokens.pop(0)
return None
self.source = MockSource()
def testInit(self):
"""CommonTokenStream.__init__()"""
stream = antlr3.CommonTokenStream(self.source)
self.assertEqual(stream.index(), -1)
def testSetTokenSource(self):
"""CommonTokenStream.setTokenSource()"""
stream = antlr3.CommonTokenStream(None)
stream.setTokenSource(self.source)
self.assertEqual(stream.index(), -1)
self.assertEqual(stream.channel, antlr3.DEFAULT_CHANNEL)
def testLTEmptySource(self):
"""CommonTokenStream.LT(): EOF (empty source)"""
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.assertEqual(lt1.type, antlr3.EOF)
def testLT1(self):
"""CommonTokenStream.LT(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.assertEqual(lt1.type, 12)
def testLT1WithHidden(self):
"""CommonTokenStream.LT(1): with hidden tokens"""
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(1)
self.assertEqual(lt1.type, 13)
def testLT2BeyondEnd(self):
"""CommonTokenStream.LT(2): beyond end"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13, channel=antlr3.HIDDEN_CHANNEL)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(2)
self.assertEqual(lt1.type, antlr3.EOF)
# not yet implemented
def testLTNegative(self):
"""CommonTokenStream.LT(-1): look back"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
lt1 = stream.LT(-1)
self.assertEqual(lt1.type, 12)
def testLB1(self):
"""CommonTokenStream.LB(1)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
self.assertEqual(stream.LB(1).type, 12)
def testLTZero(self):
"""CommonTokenStream.LT(0)"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
lt1 = stream.LT(0)
self.assertIsNone(lt1)
def testLBBeyondBegin(self):
"""CommonTokenStream.LB(-1): beyond begin"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=12, channel=antlr3.HIDDEN_CHANNEL)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
stream = antlr3.CommonTokenStream(self.source)
self.assertIsNone(stream.LB(1))
stream.consume()
stream.consume()
self.assertIsNone(stream.LB(3))
def testFillBuffer(self):
"""CommonTokenStream.fillBuffer()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=14)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
self.assertEqual(len(stream.tokens), 3)
self.assertEqual(stream.tokens[0].type, 12)
self.assertEqual(stream.tokens[1].type, 13)
self.assertEqual(stream.tokens[2].type, 14)
def testConsume(self):
"""CommonTokenStream.consume()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.assertEqual(stream.LA(1), 12)
stream.consume()
self.assertEqual(stream.LA(1), 13)
stream.consume()
self.assertEqual(stream.LA(1), antlr3.EOF)
stream.consume()
self.assertEqual(stream.LA(1), antlr3.EOF)
def testSeek(self):
"""CommonTokenStream.seek()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
self.assertEqual(stream.LA(1), 12)
stream.seek(2)
self.assertEqual(stream.LA(1), antlr3.EOF)
stream.seek(0)
self.assertEqual(stream.LA(1), 12)
def testMarkRewind(self):
"""CommonTokenStream.mark()/rewind()"""
self.source.tokens.append(
antlr3.CommonToken(type=12)
)
self.source.tokens.append(
antlr3.CommonToken(type=13)
)
self.source.tokens.append(
antlr3.CommonToken(type=antlr3.EOF)
)
stream = antlr3.CommonTokenStream(self.source)
stream.fillBuffer()
stream.consume()
marker = stream.mark()
stream.consume()
stream.rewind(marker)
self.assertEqual(stream.LA(1), 13)
def testToString(self):
"""CommonTokenStream.toString()"""
self.source.tokens.append(
antlr3.CommonToken(type=12, text="foo")
)
self.source.tokens.append(
antlr3.CommonToken(type=13, text="bar")
)
self.source.tokens.append(
antlr3.CommonToken(type=14, text="gnurz")
)
self.source.tokens.append(
antlr3.CommonToken(type=15, text="blarz")
)
stream = antlr3.CommonTokenStream(self.source)
self.assertEqual(stream.toString(), "foobargnurzblarz")
self.assertEqual(stream.toString(1, 2), "bargnurz")
self.assertEqual(stream.toString(stream.tokens[1], stream.tokens[-2]), "bargnurz")
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
google-code/android-scripting
|
refs/heads/master
|
python/src/Lib/filecmp.py
|
61
|
"""Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import ifilter, ifilterfalse, imap, izip
__all__ = ["cmp","dircmp","cmpfiles"]
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
result = _cache.get((f1, f2))
if result and (s1, s2) == result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
fp1 = open(f1, 'rb')
fp2 = open(f2, 'rb')
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
self.common = map(a.__getitem__, ifilter(b.__contains__, a))
self.left_only = map(a.__getitem__, ifilterfalse(b.__contains__, a))
self.right_only = map(b.__getitem__, ifilterfalse(a.__contains__, b))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = os.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.itervalues():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.itervalues():
print
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.itervalues():
print
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError, attr
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=1):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(ifilterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
|
hill-a/stable-baselines
|
refs/heads/master
|
stable_baselines/sac/sac.py
|
1
|
import time
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.sac.policies import SACPolicy
from stable_baselines import logger
class SAC(OffPolicyRLModel):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo
(https://github.com/rail-berkeley/softlearning/)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update", between 0 and 1)
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_update_interval: (int) update the target network every `target_network_update_freq` steps.
:param gradient_steps: (int) How many gradient update after each step
:param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto')
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf DDPG for the different action noise type.
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for SAC normally but can help exploring when using HER + SAC.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on SAC logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000,
learning_starts=100, train_freq=1, batch_size=64,
tau=0.005, ent_coef='auto', target_update_interval=1,
gradient_steps=1, target_entropy='auto', action_noise=None,
random_exploration=0.0, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=None):
super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
# In the original paper, same learning rate is used for all networks
# self.policy_lr = learning_rate
# self.qf_lr = learning_rate
# self.vf_lr = learning_rate
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.gradient_steps = gradient_steps
self.gamma = gamma
self.action_noise = action_noise
self.random_exploration = random_exploration
self.value_fn = None
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.target_entropy = target_entropy
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.value_target = None
self.step_ops = None
self.target_update_op = None
self.infos_names = None
self.entropy = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.log_ent_coef = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.deterministic_action)
return policy.obs_ph, self.actions_ph, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy.obs_ph
self.processed_next_obs_ph = self.target_policy.processed_obs
self.action_target = self.target_policy.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, create_qf=True, create_vf=False,
reuse=True)
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32,
initializer=np.log(init_value).astype(np.float32))
self.ent_coef = tf.exp(self.log_ent_coef)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef = float(self.ent_coef)
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean((q_backup - qf1) ** 2)
qf2_loss = 0.5 * tf.reduce_mean((q_backup - qf2) ** 2)
# Compute the entropy temperature loss
# it is used when the entropy coefficient is learned
ent_coef_loss, entropy_optimizer = None, None
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - qf1_pi)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
policy_loss = policy_kl_loss
# Target for value fn regression
# We update the vf towards the min of two Q-functions in order to
# reduce overestimation bias from function approximation error.
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean((value_fn - v_backup) ** 2)
values_losses = qf1_loss + qf2_loss + value_loss
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn")
target_params = tf_util.get_trainable_vars("target/values_fn")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
# Control flow is used because sess.run otherwise evaluates in nondeterministic order
# and we first need to compute the policy action before computing q values losses
with tf.control_dependencies([policy_train_op]):
train_values_op = value_optimizer.minimize(values_losses, var_list=values_params)
self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy']
# All ops to call during one training step
self.step_ops = [policy_loss, qf1_loss, qf2_loss,
value_loss, qf1, qf2, value_fn, logp_pi,
self.entropy, policy_train_op, train_values_op]
# Add entropy coefficient optimization operation if needed
if ent_coef_loss is not None:
with tf.control_dependencies([train_values_op]):
ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef)
self.infos_names += ['ent_coef_loss', 'ent_coef']
self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('entropy', self.entropy)
if ent_coef_loss is not None:
tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tf.summary.scalar('ent_coef', self.ent_coef)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/values_fn")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
def _train_step(self, step, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def learn(self, total_timesteps, callback=None,
log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Add noise to the action (improve exploration,
# not needed in general)
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) \
or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
# substract 1 as we appended a new term just now
num_episodes = len(episode_rewards) - 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
callback.on_training_end()
return self
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
if actions is not None:
raise ValueError("Error: SAC does not have action probabilities.")
warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it "
"is squashed by a tanh before being scaled and outputed.")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation, deterministic=deterministic)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False):
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
"ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto',
"target_entropy": self.target_entropy,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
|
g19-hs/personfinder
|
refs/heads/master
|
app/pytz/zoneinfo/Europe/Andorra.py
|
9
|
'''tzinfo timezone information for Europe/Andorra.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Andorra(DstTzInfo):
'''Europe/Andorra timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Andorra'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1946,9,30,0,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(0,0,'WET'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Andorra = Andorra()
|
dlorenc/runtimes-common
|
refs/heads/master
|
ftl/common/ftl_error.py
|
3
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
import hashlib
from ftl.common import constants
class FTLErrors():
@classmethod
def USER(self):
return "USER"
@classmethod
def INTERNAL(self):
return "INTERNAL"
class UserError(Exception):
def __init__(self, message):
super(UserError, self).__init__(message)
class InternalError(Exception):
def __init__(self, message):
super(InternalError, self).__init__(message)
def genErrorId(s):
return hashlib.sha256(s).hexdigest().upper()[:8]
def UserErrorHandler(err, path, fail_on_error):
logging.error(err)
if path:
resp = {
"error": {
"errorType": constants.FTL_ERROR_TYPE,
"canonicalCode": constants.FTL_USER_ERROR,
"errorId": genErrorId(str(err)),
"errorMessage": str(err)
}
}
with open(os.path.join(path, constants.BUILDER_OUTPUT_FILE), "w") as f:
f.write(json.dumps(resp))
if fail_on_error:
exit(1)
else:
exit(0)
def InternalErrorHandler(err, path, fail_on_error):
logging.error(err)
if path:
resp = {
"error": {
"errorType": constants.FTL_ERROR_TYPE,
"canonicalCode": constants.FTL_INTERNAL_ERROR,
"errorId": genErrorId(str(err)),
"errorMessage": str(err)
}
}
with open(os.path.join(path, constants.BUILDER_OUTPUT_FILE), "w") as f:
f.write(json.dumps(resp))
if fail_on_error:
exit(1)
else:
exit(0)
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/graph_objs/scattergeo/unselected/_textfont.py
|
2
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo.unselected"
_path_str = "scattergeo.unselected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of unselected points, applied only
when a selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergeo.uns
elected.Textfont`
color
Sets the text font color of unselected points, applied
only when a selection exists.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.unselected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.unselected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
rajadhva/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/html5parser.py
|
423
|
from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
detiber/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_commit.py
|
78
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_commit
short_description: commit firewall's candidate configuration
description:
- PanOS module that will commit firewall's candidate configuration on
- the device. The new configuration will become active immediately.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
interval:
description:
- interval for checking commit job
required: false
default: 0.5
timeout:
description:
- timeout for commit job
required: false
default: None
sync:
description:
- if commit should be synchronous
required: false
default: true
'''
EXAMPLES = '''
# Commit candidate config on 192.168.1.1 in sync mode
- panos_commit:
ip_address: "192.168.1.1"
username: "admin"
password: "admin"
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin'),
interval=dict(default=0.5),
timeout=dict(),
sync=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
interval = module.params['interval']
timeout = module.params['timeout']
sync = module.params['sync']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
xapi.commit(
cmd="<commit></commit>",
sync=sync,
interval=interval,
timeout=timeout
)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
|
lyft/heroku-buildpack-python-data
|
refs/heads/master
|
vendor/pip-1.3.1/pip/basecommand.py
|
63
|
"""Base Command class, and related routines"""
import os
import socket
import sys
import tempfile
import traceback
import time
import optparse
from pip.log import logger
from pip.download import urlopen
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError)
from pip.backwardcompat import StringIO, ssl
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND
from pip.util import get_prog
__all__ = ['Command']
# for backwards compatibiliy
get_proxy = urlopen.get_proxy
class Command(object):
name = None
usage = None
hidden = False
def __init__(self, main_parser):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.main_parser = main_parser
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Re-add all options and option groups.
for group in main_parser.option_groups:
self._copy_option_group(self.parser, group)
# Copies all general options from the main parser.
self._copy_options(self.parser, main_parser.option_list)
def _copy_options(self, parser, options):
"""Populate an option parser or group with options."""
for option in options:
if not option.dest:
continue
parser.add_option(option)
def _copy_option_group(self, parser, group):
"""Copy option group (including options) to another parser."""
new_group = optparse.OptionGroup(parser, group.title)
self._copy_options(new_group, group.option_list)
parser.add_option_group(new_group)
def merge_options(self, initial_options, options):
# Make sure we have all global options carried over
attrs = ['log', 'proxy', 'require_venv',
'log_explicit_levels', 'log_file',
'timeout', 'default_vcs',
'skip_requirements_regex',
'no_input', 'exists_action',
'cert']
if not ssl:
attrs.append('insecure')
for attr in attrs:
setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr))
options.quiet += initial_options.quiet
options.verbose += initial_options.verbose
def setup_logging(self):
pass
def main(self, args, initial_options):
options, args = self.parser.parse_args(args)
self.merge_options(initial_options, options)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.consumers.extend(
[(level, sys.stdout),
(logger.DEBUG, complete_log.append)])
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ''.join(options.exists_action)
if not ssl and options.insecure:
os.environ['PIP_INSECURE'] = '1'
if options.cert:
os.environ['PIP_CERT'] = options.cert
if options.require_venv:
# If a venv is required check if it can really be found
if not os.environ.get('VIRTUAL_ENV'):
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.consumers.append((logger.DEBUG, log_fp))
else:
log_fp = None
socket.setdefaulttimeout(options.timeout or None)
urlopen.setup(proxystr=options.proxy, prompting=not options.no_input)
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except KeyboardInterrupt:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if log_fp is not None:
log_fp.close()
if store_log:
log_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_fp = open_logfile(log_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_fn = temp.name
log_fp = open_logfile(log_fn, 'w')
logger.fatal('Storing complete log in %s' % log_fn)
log_fp.write(text)
log_fp.close()
return exit
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-' * 60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp
|
hayd/contracts
|
refs/heads/master
|
src/contracts/testing/library/separate_context_tc.py
|
4
|
from . import good, fail
# dictionary of string -> tuple, with tuple of two elements with different type
# In this case, each value should have the same two types
good('dict(str:tuple(type(x),type(y))),x!=y', {'a': (2, 1.1)})
fail('dict(str:tuple(type(x),type(y))),x!=y', {'a': (2, 1)})
# This fails because we have x=int,y=float followed by float,int
fail('dict(str:tuple(type(x),type(y))),x!=y', {'a': (2, 1.1), 'b': (1.1, 2)})
# Here we force the context to not match using $(...)
good('dict(str:$(tuple(type(x),type(y)),x!=y))', {'a': (2, 1.1),
'b': (1.1, 2)})
fail('dict(str:$(tuple(type(x),type(y)),x!=y))', {'a': (2, 1)})
|
modulus-sa/pybbix
|
refs/heads/master
|
pybbix/api/__init__.py
|
1
|
"""Abstract API classes."""
import json
JSON_RPC_VERSION = '2.0'
class AbstractAPI:
"""Abstract interface of all children APIs.
This is repsonsible for setting the right attributes
that ``Request`` uses to build the request dict."""
def __init__(self):
with open('params.json') as f:
self._methods = json.load(f)
def __getattr__(self, name):
print('METHODS:', self._methods)
if name in self._methods:
return self._make_method(name)
else:
msg = '{} is not an available method on the {} API.'
api_name = self.__class__.__name__
raise AttributeError(msg.format(name, api_name))
def _make_method(self, name):
cls_name = self.__class__.__name__.lower()
method_name = cls_name + '.' + name
def method(**kwargs):
self.params = kwargs
self.method = method_name
return self()
# interactive/debugging
method.__qualname__ = method_name
setattr(self, name, method)
return method
class Request:
"""Object representing the request dict.
This does not provide any API related functionality,
it must be used together with ``AbstractAPI``."""
jsonrpc = JSON_RPC_VERSION
method = None
params = None
id = 0
auth = None
def __new__(cls, *args, **kwargs):
req = super().__new__(cls)
# each request has a unique id
req.id = Request.id = Request.id + 1
return req
def __call__(self):
return {
'jsonrpc': self.jsonrpc,
'method': self.method,
'params': self.params,
'id': self.id,
'auth': self.auth}
class API(AbstractAPI, Request):
"""Base class of all Zabbix APIs."""
|
mastizada/kuma
|
refs/heads/master
|
vendor/lib/python/debug_toolbar/panels/templates/panel.py
|
9
|
from __future__ import absolute_import, unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from os.path import normpath
from pprint import pformat
import django
from django import http
from django.conf import settings
from django.conf.urls import patterns, url
from django.db.models.query import QuerySet, RawQuerySet
from django.template import Context, RequestContext, Template
from django.template.context import get_standard_processors
from django.test.signals import template_rendered
from django.test.utils import instrumented_test_render
from django.utils.encoding import force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.tracking import recording, SQLQueryTriggered
# Monkey-patch to enable the template_rendered signal. The receiver returns
# immediately when the panel is disabled to keep the overhead small.
# Code taken and adapted from Simon Willison and Django Snippets:
# http://www.djangosnippets.org/snippets/766/
if Template._render != instrumented_test_render:
Template.original_render = Template._render
Template._render = instrumented_test_render
# Monkey-patch to store items added by template context processors. The
# overhead is sufficiently small to justify enabling it unconditionally.
def _request_context__init__(
self, request, dict_=None, processors=None, current_app=None,
use_l10n=None, use_tz=None):
Context.__init__(
self, dict_, current_app=current_app,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
self.context_processors = OrderedDict()
updates = dict()
for processor in get_standard_processors() + processors:
name = '%s.%s' % (processor.__module__, processor.__name__)
context = processor(request)
self.context_processors[name] = context
updates.update(context)
self.update(updates)
RequestContext.__init__ = _request_context__init__
# Monkey-patch versions of Django where Template doesn't store origin.
# See https://code.djangoproject.com/ticket/16096.
if django.VERSION[:2] < (1, 7):
old_template_init = Template.__init__
def new_template_init(self, template_string, origin=None, name='<Unknown Template>'):
old_template_init(self, template_string, origin, name)
self.origin = origin
Template.__init__ = new_template_init
class TemplatesPanel(Panel):
"""
A panel that lists all templates used during processing of a response.
"""
def __init__(self, *args, **kwargs):
super(TemplatesPanel, self).__init__(*args, **kwargs)
self.templates = []
def _store_template_info(self, sender, **kwargs):
template, context = kwargs['template'], kwargs['context']
# Skip templates that we are generating through the debug toolbar.
if (isinstance(template.name, six.string_types) and
template.name.startswith('debug_toolbar/')):
return
context_list = []
for context_layer in context.dicts:
temp_layer = {}
if hasattr(context_layer, 'items'):
for key, value in context_layer.items():
# Replace any request elements - they have a large
# unicode representation and the request data is
# already made available from the Request panel.
if isinstance(value, http.HttpRequest):
temp_layer[key] = '<<request>>'
# Replace the debugging sql_queries element. The SQL
# data is already made available from the SQL panel.
elif key == 'sql_queries' and isinstance(value, list):
temp_layer[key] = '<<sql_queries>>'
# Replace LANGUAGES, which is available in i18n context processor
elif key == 'LANGUAGES' and isinstance(value, tuple):
temp_layer[key] = '<<languages>>'
# QuerySet would trigger the database: user can run the query from SQL Panel
elif isinstance(value, (QuerySet, RawQuerySet)):
model_name = "%s.%s" % (
value.model._meta.app_label, value.model.__name__)
temp_layer[key] = '<<%s of %s>>' % (
value.__class__.__name__.lower(), model_name)
else:
try:
recording(False)
pformat(value) # this MAY trigger a db query
except SQLQueryTriggered:
temp_layer[key] = '<<triggers database query>>'
except UnicodeEncodeError:
temp_layer[key] = '<<unicode encode error>>'
except Exception:
temp_layer[key] = '<<unhandled exception>>'
else:
temp_layer[key] = value
finally:
recording(True)
try:
context_list.append(pformat(temp_layer))
except UnicodeEncodeError:
pass
kwargs['context'] = [force_text(item) for item in context_list]
kwargs['context_processors'] = getattr(context, 'context_processors', None)
self.templates.append(kwargs)
# Implement the Panel API
nav_title = _("Templates")
@property
def title(self):
num_templates = len(self.templates)
return _("Templates (%(num_templates)s rendered)") % {'num_templates': num_templates}
template = 'debug_toolbar/panels/templates.html'
@classmethod
def get_urls(cls):
return patterns('debug_toolbar.panels.templates.views', # noqa
url(r'^template_source/$', 'template_source', name='template_source'),
)
def enable_instrumentation(self):
template_rendered.connect(self._store_template_info)
def disable_instrumentation(self):
template_rendered.disconnect(self._store_template_info)
def process_response(self, request, response):
template_context = []
for template_data in self.templates:
info = {}
# Clean up some info about templates
template = template_data.get('template', None)
if not hasattr(template, 'origin'):
continue
if template.origin and template.origin.name:
template.origin_name = template.origin.name
else:
template.origin_name = 'No origin'
info['template'] = template
# Clean up context for better readability
if self.toolbar.config['SHOW_TEMPLATE_CONTEXT']:
context_list = template_data.get('context', [])
info['context'] = '\n'.join(context_list)
template_context.append(info)
# Fetch context_processors from any template
if self.templates:
context_processors = self.templates[0]['context_processors']
else:
context_processors = None
self.record_stats({
'templates': template_context,
'template_dirs': [normpath(x) for x in settings.TEMPLATE_DIRS],
'context_processors': context_processors,
})
|
jaymiejones86/jaymiejones.com
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatters/other.py
|
363
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
|
NicolasDichtel/linux
|
refs/heads/master
|
tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
|
91
|
'''
run the command under test, under valgrind and collect memory leak info
as a separate test.
'''
import os
import re
import signal
from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
from tdc_config import *
def vp_extract_num_from_string(num_as_string_maybe_with_commas):
return int(num_as_string_maybe_with_commas.replace(',',''))
class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'valgrind/SubPlugin'
self.tap = ''
super().__init__()
def pre_suite(self, testcount, testidlist):
'''run commands before test_runner goes into a test loop'''
super().pre_suite(testcount, testidlist)
if self.args.verbose > 1:
print('{}.pre_suite'.format(self.sub_class))
if self.args.valgrind:
self._add_to_tap('1..{}\n'.format(self.testcount))
def post_suite(self, index):
'''run commands after test_runner goes into a test loop'''
super().post_suite(index)
self._add_to_tap('\n|---\n')
if self.args.verbose > 1:
print('{}.post_suite'.format(self.sub_class))
print('{}'.format(self.tap))
if self.args.verbose < 4:
subprocess.check_output('rm -f vgnd-*.log', shell=True)
def add_args(self, parser):
super().add_args(parser)
self.argparser_group = self.argparser.add_argument_group(
'valgrind',
'options for valgrindPlugin (run command under test under Valgrind)')
self.argparser_group.add_argument(
'-V', '--valgrind', action='store_true',
help='Run commands under valgrind')
return self.argparser
def adjust_command(self, stage, command):
super().adjust_command(stage, command)
cmdform = 'list'
cmdlist = list()
if not self.args.valgrind:
return command
if self.args.verbose > 1:
print('{}.adjust_command'.format(self.sub_class))
if not isinstance(command, list):
cmdform = 'str'
cmdlist = command.split()
else:
cmdlist = command
if stage == 'execute':
if self.args.verbose > 1:
print('adjust_command: stage is {}; inserting valgrind stuff in command [{}] list [{}]'.
format(stage, command, cmdlist))
cmdlist.insert(0, '--track-origins=yes')
cmdlist.insert(0, '--show-leak-kinds=definite,indirect')
cmdlist.insert(0, '--leak-check=full')
cmdlist.insert(0, '--log-file=vgnd-{}.log'.format(self.args.testid))
cmdlist.insert(0, '-v') # ask for summary of non-leak errors
cmdlist.insert(0, ENVIR['VALGRIND_BIN'])
else:
pass
if cmdform == 'str':
command = ' '.join(cmdlist)
else:
command = cmdlist
if self.args.verbose > 1:
print('adjust_command: return command [{}]'.format(command))
return command
def post_execute(self):
if not self.args.valgrind:
return
self.definitely_lost_re = re.compile(
r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL)
self.indirectly_lost_re = re.compile(
r'indirectly lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL)
self.possibly_lost_re = re.compile(
r'possibly lost:\s+([,0-9]+)bytes in\s+([,0-9]+)\s+blocks', re.MULTILINE | re.DOTALL)
self.non_leak_error_re = re.compile(
r'ERROR SUMMARY:\s+([,0-9]+) errors from\s+([,0-9]+)\s+contexts', re.MULTILINE | re.DOTALL)
def_num = 0
ind_num = 0
pos_num = 0
nle_num = 0
# what about concurrent test runs? Maybe force them to be in different directories?
with open('vgnd-{}.log'.format(self.args.testid)) as vfd:
content = vfd.read()
def_mo = self.definitely_lost_re.search(content)
ind_mo = self.indirectly_lost_re.search(content)
pos_mo = self.possibly_lost_re.search(content)
nle_mo = self.non_leak_error_re.search(content)
if def_mo:
def_num = int(def_mo.group(2))
if ind_mo:
ind_num = int(ind_mo.group(2))
if pos_mo:
pos_num = int(pos_mo.group(2))
if nle_mo:
nle_num = int(nle_mo.group(1))
mem_results = ''
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
mem_results += 'ok {} - {}-mem # {}\n'.format(
self.args.test_ordinal, self.args.testid, 'memory leak check')
self._add_to_tap(mem_results)
if mem_results.startswith('not '):
print('{}'.format(content))
self._add_to_tap(content)
def _add_to_tap(self, more_tap_output):
self.tap += more_tap_output
|
DavidMikeSimon/ansible
|
refs/heads/devel
|
lib/ansible/plugins/vars/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
ernstbaslerpartner/send2geoadmin
|
refs/heads/master
|
env/Lib/site-packages/pip-1.2.1-py2.7.egg/pip/commands/help.py
|
80
|
from pip.basecommand import (Command, command_dict,
load_all_commands, SUCCESS,
ERROR)
from pip.exceptions import CommandError
from pip.baseparser import parser
class HelpCommand(Command):
name = 'help'
usage = '%prog'
summary = 'Show available commands'
def run(self, options, args):
load_all_commands()
if args:
## FIXME: handle errors better here
command = args[0]
if command not in command_dict:
raise CommandError('No command with the name: %s' % command)
command = command_dict[command]
command.parser.print_help()
return SUCCESS
parser.print_help()
print('\nCommands available:')
commands = list(set(command_dict.values()))
commands.sort(key=lambda x: x.name)
for command in commands:
if command.hidden:
continue
print(' %s: %s' % (command.name, command.summary))
return SUCCESS
HelpCommand()
|
jspraul/bite-project
|
refs/heads/master
|
deps/gdata-python-client/src/gdata/apps/groups/data.py
|
102
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for the Groups Provisioning API."""
__author__ = 'Shraddha gupta <shraddhag@google.com>'
import atom.data
import gdata.apps
import gdata.apps.apps_property_entry
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property groupId of a group entry
GROUP_ID = 'groupId'
# The apps:property groupName of a group entry
GROUP_NAME = 'groupName'
# The apps:property description of a group entry
DESCRIPTION = 'description'
# The apps:property emailPermission of a group entry
EMAIL_PERMISSION = 'emailPermission'
# The apps:property memberId of a group member entry
MEMBER_ID = 'memberId'
# The apps:property memberType of a group member entry
MEMBER_TYPE = 'memberType'
# The apps:property directMember of a group member entry
DIRECT_MEMBER = 'directMember'
class GroupEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents a group entry in object form."""
def GetGroupId(self):
"""Get groupId of the GroupEntry object.
Returns:
The groupId this GroupEntry object as a string or None.
"""
return self._GetProperty(GROUP_ID)
def SetGroupId(self, value):
"""Set the groupId of this GroupEntry object.
Args:
value: string The new groupId to give this object.
"""
self._SetProperty(GROUP_ID, value)
group_id = pyproperty(GetGroupId, SetGroupId)
def GetGroupName(self):
"""Get the groupName of the GroupEntry object.
Returns:
The groupName of this GroupEntry object as a string or None.
"""
return self._GetProperty(GROUP_NAME)
def SetGroupName(self, value):
"""Set the groupName of this GroupEntry object.
Args:
value: string The new groupName to give this object.
"""
self._SetProperty(GROUP_NAME, value)
group_name = pyproperty(GetGroupName, SetGroupName)
def GetDescription(self):
"""Get the description of the GroupEntry object.
Returns:
The description of this GroupEntry object as a string or None.
"""
return self._GetProperty(DESCRIPTION)
def SetDescription(self, value):
"""Set the description of this GroupEntry object.
Args:
value: string The new description to give this object.
"""
self._SetProperty(DESCRIPTION, value)
description = pyproperty(GetDescription, SetDescription)
def GetEmailPermission(self):
"""Get the emailPermission of the GroupEntry object.
Returns:
The emailPermission of this GroupEntry object as a string or None.
"""
return self._GetProperty(EMAIL_PERMISSION)
def SetEmailPermission(self, value):
"""Set the emailPermission of this GroupEntry object.
Args:
value: string The new emailPermission to give this object.
"""
self._SetProperty(EMAIL_PERMISSION, value)
email_permission = pyproperty(GetEmailPermission, SetEmailPermission)
def __init__(self, group_id=None, group_name=None, description=None,
email_permission=None, *args, **kwargs):
"""Constructs a new GroupEntry object with the given arguments.
Args:
group_id: string identifier of the group.
group_name: string name of the group.
description: string (optional) the group description.
email_permisison: string (optional) permission level of the group.
"""
super(GroupEntry, self).__init__(*args, **kwargs)
if group_id:
self.group_id = group_id
if group_name:
self.group_name = group_name
if description:
self.description = description
if email_permission:
self.email_permission = email_permission
class GroupFeed(gdata.data.GDFeed):
"""Represents a feed of GroupEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [GroupEntry]
class GroupMemberEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
"""Represents a group member in object form."""
def GetMemberId(self):
"""Get the memberId of the GroupMember object.
Returns:
The memberId of this GroupMember object as a string.
"""
return self._GetProperty(MEMBER_ID)
def SetMemberId(self, value):
"""Set the memberId of this GroupMember object.
Args:
value: string The new memberId to give this object.
"""
self._SetProperty(MEMBER_ID, value)
member_id = pyproperty(GetMemberId, SetMemberId)
def GetMemberType(self):
"""Get the memberType(User, Group) of the GroupMember object.
Returns:
The memberType of this GroupMember object as a string or None.
"""
return self._GetProperty(MEMBER_TYPE)
def SetMemberType(self, value):
"""Set the memberType of this GroupMember object.
Args:
value: string The new memberType to give this object.
"""
self._SetProperty(MEMBER_TYPE, value)
member_type = pyproperty(GetMemberType, SetMemberType)
def GetDirectMember(self):
"""Get the directMember of the GroupMember object.
Returns:
The directMember of this GroupMember object as a bool or None.
"""
return self._GetProperty(DIRECT_MEMBER)
def SetDirectMember(self, value):
"""Set the memberType of this GroupMember object.
Args:
value: string The new memberType to give this object.
"""
self._SetProperty(DIRECT_MEMBER, value)
direct_member = pyproperty(GetDirectMember, SetDirectMember)
def __init__(self, member_id=None, member_type=None,
direct_member=None, *args, **kwargs):
"""Constructs a new GroupMemberEntry object with the given arguments.
Args:
member_id: string identifier of group member object.
member_type: string (optional) member type of group member object.
direct_member: bool (optional) if group member object is direct member.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(GroupMemberEntry, self).__init__(*args, **kwargs)
if member_id:
self.member_id = member_id
if member_type:
self.member_type = member_type
if direct_member:
self.direct_member = direct_member
class GroupMemberFeed(gdata.data.GDFeed):
"""Represents a feed of GroupMemberEntry objects."""
# Override entry so that this feed knows how to type its list of entries.
entry = [GroupMemberEntry]
|
klkuhlm/mpmath
|
refs/heads/master
|
mpmath/tests/test_compatibility.py
|
15
|
from mpmath import *
from random import seed, randint, random
import math
# Test compatibility with Python floats, which are
# IEEE doubles (53-bit)
N = 5000
seed(1)
# Choosing exponents between roughly -140, 140 ensures that
# the Python floats don't overflow or underflow
xs = [(random()-1) * 10**randint(-140, 140) for x in range(N)]
ys = [(random()-1) * 10**randint(-140, 140) for x in range(N)]
# include some equal values
ys[int(N*0.8):] = xs[int(N*0.8):]
# Detect whether Python is compiled to use 80-bit floating-point
# instructions, in which case the double compatibility test breaks
uses_x87 = -4.1974624032366689e+117 / -8.4657370748010221e-47 \
== 4.9581771393902231e+163
def test_double_compatibility():
mp.prec = 53
for x, y in zip(xs, ys):
mpx = mpf(x)
mpy = mpf(y)
assert mpf(x) == x
assert (mpx < mpy) == (x < y)
assert (mpx > mpy) == (x > y)
assert (mpx == mpy) == (x == y)
assert (mpx != mpy) == (x != y)
assert (mpx <= mpy) == (x <= y)
assert (mpx >= mpy) == (x >= y)
assert mpx == mpx
if uses_x87:
mp.prec = 64
a = mpx + mpy
b = mpx * mpy
c = mpx / mpy
d = mpx % mpy
mp.prec = 53
assert +a == x + y
assert +b == x * y
assert +c == x / y
assert +d == x % y
else:
assert mpx + mpy == x + y
assert mpx * mpy == x * y
assert mpx / mpy == x / y
assert mpx % mpy == x % y
assert abs(mpx) == abs(x)
assert mpf(repr(x)) == x
assert ceil(mpx) == math.ceil(x)
assert floor(mpx) == math.floor(x)
def test_sqrt():
# this fails quite often. it appers to be float
# that rounds the wrong way, not mpf
fail = 0
mp.prec = 53
for x in xs:
x = abs(x)
mp.prec = 100
mp_high = mpf(x)**0.5
mp.prec = 53
mp_low = mpf(x)**0.5
fp = x**0.5
assert abs(mp_low-mp_high) <= abs(fp-mp_high)
fail += mp_low != fp
assert fail < N/10
def test_bugs():
# particular bugs
assert mpf(4.4408920985006262E-16) < mpf(1.7763568394002505E-15)
assert mpf(-4.4408920985006262E-16) > mpf(-1.7763568394002505E-15)
|
saumishr/django
|
refs/heads/master
|
tests/regressiontests/urlpatterns_reverse/no_urls.py
|
155
|
#from django.conf.urls import patterns, url, include
|
nin042/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py
|
121
|
# Copyright (C) 2012 Intel Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cmake.py."""
import unittest2 as unittest
from cmake import CMakeChecker
class CMakeCheckerTest(unittest.TestCase):
"""Tests CMakeChecker class."""
def test_init(self):
"""Test __init__() method."""
def _mock_handle_style_error(self):
pass
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
self.assertEqual(checker._handle_style_error, _mock_handle_style_error)
def test_check(self):
"""Test check() method."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
lines = [
'# This file is sample input for cmake_unittest.py and includes below problems:\n',
'IF ()',
'\tmessage("Error line with Tab")\n',
' message("Error line with endding spaces") \n',
' message( "Error line with space after (")\n',
' message("Error line with space before (" )\n',
' MESSAGE("Error line with upper case non-condtional command")\n',
' MESSage("Error line with upper case non-condtional command")\n',
' message("correct message line")\n',
'ENDif ()\n',
'\n',
'if()\n',
'endif ()\n',
'\n',
'macro ()\n',
'ENDMacro()\n',
'\n',
'function ()\n',
'endfunction()\n',
]
checker.check(lines)
self.maxDiff = None
self.assertEqual(errors, [
(3, 'whitespace/tab', 5, 'Line contains tab character.'),
(2, 'command/lowercase', 5, 'Use lowercase command "if"'),
(4, 'whitespace/trailing', 5, 'No trailing spaces'),
(5, 'whitespace/parentheses', 5, 'No space after "("'),
(6, 'whitespace/parentheses', 5, 'No space before ")"'),
(7, 'command/lowercase', 5, 'Use lowercase command "message"'),
(8, 'command/lowercase', 5, 'Use lowercase command "message"'),
(10, 'command/lowercase', 5, 'Use lowercase command "endif"'),
(12, 'whitespace/parentheses', 5, 'One space between command "if" and its parentheses, should be "if ("'),
(15, 'whitespace/parentheses', 5, 'No space between command "macro" and its parentheses, should be "macro("'),
(16, 'command/lowercase', 5, 'Use lowercase command "endmacro"'),
(18, 'whitespace/parentheses', 5, 'No space between command "function" and its parentheses, should be "function("'),
])
|
yannrouillard/weboob
|
refs/heads/master
|
weboob/capabilities/torrent.py
|
5
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon, Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import IBaseCap, CapBaseObject, Field, StringField, FloatField, \
DateField, IntField, UserError
__all__ = ['MagnetOnly', 'Torrent', 'ICapTorrent']
class MagnetOnly(UserError):
"""
Raised when trying to get URL to torrent but only magnet is available.
"""
def __init__(self, magnet):
self.magnet = magnet
UserError.__init__(self, 'Only magnet URL is available')
class Torrent(CapBaseObject):
"""
Torrent object.
"""
name = StringField('Name of torrent')
size = FloatField('Size of torrent')
date = DateField('Date when torrent has been published')
url = StringField('Direct url to .torrent file')
magnet = StringField('URI of magnet')
seeders = IntField('Number of seeders')
leechers = IntField('Number of leechers')
files = Field('Files in torrent', list)
description = StringField('Description of torrent')
filename = StringField('Name of .torrent file')
def __init__(self, id, name):
CapBaseObject.__init__(self, id)
self.name = name
class ICapTorrent(IBaseCap):
"""
Torrent trackers.
"""
def iter_torrents(self, pattern):
"""
Search torrents and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Torrent`]
"""
raise NotImplementedError()
def get_torrent(self, _id):
"""
Get a torrent object from an ID.
:param _id: ID of torrent
:type _id: str
:rtype: :class:`Torrent`
"""
raise NotImplementedError()
def get_torrent_file(self, _id):
"""
Get the content of the .torrent file.
:param _id: ID of torrent
:type _id: str
:rtype: str
"""
raise NotImplementedError()
|
anthonynguyen/UrTSB
|
refs/heads/master
|
urtsb_src/ui/recenttab.py
|
3
|
#
# Copyright (C) 2010 Sorcerer
#
# This file is part of UrTSB.
#
# UrTSB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UrTSB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UrTSB. If not, see <http://www.gnu.org/licenses/>.
#
from basetab import BaseTab
from playerlist import PlayerList
from recentserverfilter import RecentSeversFilter
from recentserverslist import RecentServersList
from serverdetailbox import ServerDetailBox
from statusbar import StatusBar
from urtsb_src.guicontroller import GuiController
import gtk
class RecentTab(BaseTab):
"""
Content of the Recent Servers tab.
- serverlist treeview,
- detailarea with playerlist, servervars, serverinfo and buttons
"""
def __init__(self):
"""
Constructor
"""
gtk.VBox.__init__(self)
self.filter = RecentSeversFilter()
self.filter.show()
self.pack_start(self.filter, False, False)
# top pane area
paned = gtk.VPaned()
paned.show()
self.pack_start(paned)
# bottom add a statusbar
self.statusbar = StatusBar(self)
self.pack_start(self.statusbar, False, False)
# serverlist window
self.serverlist = RecentServersList(self)
paned.pack1(self.serverlist, True, False)
#paned.add1(self.serverlist)
# bottom panearea
bottompane = gtk.HPaned()
paned.pack2(bottompane, True, False)
#paned.add2(bottompane)
#left box
self.playerlist = PlayerList()
bottompane.pack1(self.playerlist, False, False)
#right box
self.detailsbox = ServerDetailBox()
vbox = gtk.VBox()
bottompane.pack2(vbox, True, False)
buttonbox = gtk.HBox()
vbox.pack_start(buttonbox, False, False)
vbox.pack_start(self.detailsbox)
refresh_button = gtk.Button('Refresh')
refreshimage = gtk.Image()
refreshimage.set_from_stock(gtk.STOCK_REFRESH, gtk.ICON_SIZE_BUTTON)
refresh_button.set_image(refreshimage)
connect_button = gtk.Button('Connect')
connectimage = gtk.Image()
connectimage.set_from_stock(gtk.STOCK_CONNECT, gtk.ICON_SIZE_BUTTON)
connect_button.set_image(connectimage)
addfav_button = gtk.Button('Add to Favorites')
favimage = gtk.Image()
favimage.set_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_BUTTON)
addfav_button.set_image(favimage)
removerecent_button = gtk.Button('Remove Server from List')
removeimage = gtk.Image()
removeimage.set_from_stock(gtk.STOCK_DELETE, gtk.ICON_SIZE_BUTTON)
removerecent_button.set_image(removeimage)
buttonbox.pack_start(refresh_button, True, True)
buttonbox.pack_start(connect_button, True, True)
buttonbox.pack_start(addfav_button, True, True)
buttonbox.pack_start(removerecent_button, True, True)
refresh_button.connect("clicked", self.onRefreshButtonClicked)
connect_button.connect("clicked", self.connect_button_clicked)
removerecent_button.connect("clicked", self.onRemoveRecentClicked)
addfav_button.connect("clicked", self.onAddFavButtonClicked)
self.show_all()
def onRemoveRecentClicked(self, widget):
"""
Callback method for the remove button. Triggers the removal of
the recent server entry by calling the gui controller which then
removes the recent server (from list in memory and also from file)
Also removes the recent server directly from the liststore.
@param widget - the widget that emitted the clicked signal - the button
"""
#remove row from liststore and also the server from the recent list
selection = self.serverlist.serverlistview.get_selection()
result = selection.get_selected()
if result:
model, iter = result
server = self.serverlist.liststore.get_value(iter, 8)
#remove it from the favoriteslist
gui = GuiController()
gui.removeRecent(server)
model.remove(iter)
def serverlist_loading_finished(self):
"""
Callback method executed when the search has finished
"""
#reactivate the search button
self.filter.refresh_button.set_sensitive(True)
self.statusbar.lock()
self.qm = None
|
Tomcuzz/OctaHomeAutomation
|
refs/heads/master
|
OctaHomeCore/management/commands/SetupWeather.py
|
1
|
from django.core.management.base import BaseCommand, CommandError
from OctaHomeCore.models.weather import *
from django.conf import settings
class Command(BaseCommand):
help = "Set Up Database Weather Locations"
def handle(self, *args, **options):
if settings.MET_OFFICE_API_KEY != "":
print "Updating Weather Locations (This May Take A While Depending On Your Internet Connection)"
WeatherLocation.objects.updateLocations()
print "Weather Location Update Complete!"
else:
print "ERROR: No API Key Found, Please Get A API Key From The MET Office"
|
dennisguse/pjsip
|
refs/heads/master
|
tests/pjsua/scripts-sendto/157_err_sdp_bad_addr_type.py
|
42
|
# $Id$
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP7 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
pjsua_args = "--null-audio --auto-answer 200"
extra_headers = ""
include = [ "Warning: " ] # better have Warning header
exclude = []
sendto_cfg = sip.SendtoCfg("Bad SDP address type", pjsua_args, sdp, 400,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
wunderlins/learning
|
refs/heads/master
|
python/zodb/lib/osx/ZODB/tests/speed.py
|
2
|
from __future__ import print_function
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
usage="""Test speed of a ZODB storage
Options:
-d file The data file to use as input.
The default is this script.
-n n The number of repititions
-s module A module that defines a 'Storage'
attribute, which is an open storage.
If not specified, a FileStorage will ne
used.
-z Test compressing data
-D Run in debug mode
-L Test loads as well as stores by minimizing
the cache after eachrun
-M Output means only
"""
import sys, os, getopt, string, time
sys.path.insert(0, os.getcwd())
import ZODB, ZODB.FileStorage
import persistent
import transaction
class P(persistent.Persistent): pass
def main(args):
opts, args = getopt.getopt(args, 'zd:n:Ds:LM')
z=s=None
data=sys.argv[0]
nrep=5
minimize=0
detailed=1
for o, v in opts:
if o=='-n': nrep=string.atoi(v)
elif o=='-d': data=v
elif o=='-s': s=v
elif o=='-z':
global zlib
import zlib
z=compress
elif o=='-L':
minimize=1
elif o=='-M':
detailed=0
elif o=='-D':
global debug
os.environ['STUPID_LOG_FILE']=''
os.environ['STUPID_LOG_SEVERITY']='-999'
if s:
s=__import__(s, globals(), globals(), ('__doc__',))
s=s.Storage
else:
s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
with open(data) as fp:
data = fp.read()
db=ZODB.DB(s,
# disable cache deactivation
cache_size=4000,
cache_deactivate_after=6000,)
results={1:0, 10:0, 100:0, 1000:0}
for j in range(nrep):
for r in 1, 10, 100, 1000:
t=time.time()
jar=db.open()
transaction.begin()
rt=jar.root()
key='s%s' % r
if key in rt: p=rt[key]
else: rt[key]=p=P()
for i in range(r):
if z is not None: d=z(data)
else: d=data
v=getattr(p, str(i), P())
v.d=d
setattr(p,str(i),v)
transaction.commit()
jar.close()
t=time.time()-t
if detailed:
sys.stderr.write("%s\t%s\t%.4f\n" % (j, r, t))
sys.stdout.flush()
results[r]=results[r]+t
rt=d=p=v=None # release all references
if minimize:
time.sleep(3)
jar.cacheMinimize(3)
if detailed: print('-'*24)
for r in 1, 10, 100, 1000:
t=results[r]/nrep
sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
db.close()
def compress(s):
c=zlib.compressobj()
o=c.compress(s)
return o+c.flush()
if __name__=='__main__': main(sys.argv[1:])
|
jewettaij/moltemplate
|
refs/heads/master
|
examples/coarse_grained/DNA_models/dsDNA_only/2strands/3bp_2particles/simple_dna_example/measure_persistence_length/merge_lines_periodic.py
|
4
|
#!/usr/bin/env python
import sys, math
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
g_program_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2020-1-19'
g_version_str = '0.3.0'
usage_example = g_program_name + """
merge_lines_periodic.py i1 i2 i3... \
[-p natoms_per_monomer] \
[-s nskip] [-d delim_atom] [-D delim_monomer] \
< crds_input.raw \
> multiple_atom_crds_per_line.dat
Explanation: This script splits a text file into equally sized "blocks" (aka "monomers")
and pastes the text text from different lines in each block into the
same line (with optional delimeters).
The i1 i2 i3,... indices select the lines in each block (of atom
coordinates in each monomer) that you want to merge together.
Indexing begins at 0, not 1. (The first line in a block has i=0)
-Negative numbers correspond to atoms in the previous block(monomer).
-Numbers larger than natoms_per_monomer lie in the next block(monomer).
If any of these indices lie out of range, then the entire list
of lines in this block is ignored.
-The -p argument indicates the number of lines in each block (aka "monomer")
If the -p argument is skipped, then it is assumed to be infinity. (In other
words, it is equal to the number of lines in the polymer conformation.)
-The -s nskip argument allows you to skip over lines at the begining
of the file. (NOTE: Comments and lines beginning with comments
are ignored already, so don't include them in the nskip argument.)
-The -d and -D delimeters allow you to change the string which
separates text belonging to different atoms(lines), and different
monomers (blocks). By default, they are " " and "\\n", respectively.
-Blank lines (if present) in the input file are interpreted as delimeters
separating different "polymer conformations". When encountered, each
"polymer conformation" is processed separately, with the output for
different polymer conformations delimted by blank lines.
"""
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def EscCharStrToChar(s_in, escape='\\'):
"""
EscCharStrToChar() replaces any escape sequences
in a string with their 1-character equivalents.
"""
assert(len(escape) > 0)
out_lstr = []
escaped_state = False
for c in s_in:
if escaped_state:
if (c == 'n'):
out_lstr.append('\n')
elif (c == 't'):
out_lstr.append('\t')
elif (c == 'r'):
out_lstr.append('\r')
elif (c == 'f'):
out_lstr.append('\f')
elif (c == '\''):
out_lstr.append('\'')
elif (c == '\"'):
out_lstr.append('\"')
elif c in escape:
out_lstr.append(c)
else:
out_lstr.append(escape+c) # <- keep both characters
escaped_state = False
else:
if c in escape:
escaped_state = True
else:
out_lstr.append(c)
return ''.join(out_lstr)
def SafelyEncodeString(in_str,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
SafelyEncodeString(in_str) scans through the input string (in_str),
and returns a new string in which problematic characters
(like newlines, tabs, quotes, etc), are replaced by their two-character
backslashed equivalents (like '\n', '\t', '\'', '\"', etc).
The escape character is the backslash by default, but it too can be
overridden to create custom escape sequences
(but this does not effect the encoding for characters like '\n', '\t').
"""
assert(len(escape) > 0)
out_lstr = []
use_outer_quotes = False
for c in in_str:
if (c == '\n'):
c = '\\n'
elif (c == '\t'):
c = '\\t'
elif (c == '\r'):
c = '\\r'
elif (c == '\f'):
c = '\\f'
elif c in quotes:
c = escape[0]+c
elif c in escape:
c = c+c
elif c in delimiters:
use_outer_quotes = True
# hmm... that's all that comes to mind. Did I leave anything out?
out_lstr.append(c)
if use_outer_quotes:
out_lstr = ['\"'] + out_lstr + ['\"']
return ''.join(out_lstr)
def ProcessSnapshot(lines,
out_file,
offsets,
period,
nskip,
delimeter_atom,
delimeter_monomer):
offsets_min = min(offsets)
offsets_max = max(offsets)
if period == 0:
num_monomers = 1
else:
num_monomers = math.floor((len(lines)-nskip)/period)
for I in range(0, num_monomers):
# If any of the entries will be missing, then ignore the whole list
# of atoms (lines) for this monomer (block).
if (I*period + offsets_min < nskip):
continue
if (I*period + offsets_max >= len(lines)):
continue
for J in range(0, len(offsets)):
j = offsets[J]
i = (I*period + nskip) + j
if (nskip <= i) and (i < len(lines)):
out_file.write(lines[i])
if J+1 < len(offsets):
out_file.write(delimeter_atom)
else:
out_file.write(delimeter_monomer)
g_period = 0
g_nskip = 0
g_delimeter_atom = ' '
g_delimeter_monomer = '\n'
g_delimeter_snapshot = '\n'
g_offsets = []
####### Main Code Below: #######
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i+1 >= len(argv):
sys.stdout.write("\n Usage:\n\n"+usage_example+'\n')
sys.exit(0)
elif argv[i].lower() == '-p':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a number.\n')
g_period = int(argv[i+1])
sys.stderr.write(' period = '+str(g_period)+'\n')
del(argv[i:i+2])
elif argv[i].lower() == '-s':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a number.\n')
g_nskip = float(argv[i+1])
sys.stderr.write(' skip first '+str(g_nskip)+' non-comment lines\n')
del(argv[i:i+2])
elif argv[i].lower() == '-d':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a string.\n')
g_delimeter_atom = EscCharStrToChar(argv[i+1])
sys.stderr.write(' delimeter_atom = \"'+SafelyEncodeString(g_delimeter_atom)+'\"\n')
del(argv[i:i+2])
elif argv[i].lower() == '-D':
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by string.\n')
g_delimeter_atom = EscCharStrToChar(argv[i+1])
sys.stderr.write(' delimeter_monomer = \"'+SafelyEncodeString(g_delimeter_monomer)+'\"\n')
del(argv[i:i+2])
elif argv[i][0] == '-':
# Note: It could be a negative integer, so check for
# that before printing an error message
if not argv[i][1:].isdigit():
raise InputError('Error('+g_program_name+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
i += 1
else:
i += 1
if len(argv) == 1:
raise InputError("Error: Expected a list of integers.\n\n"+
"Usage: \n\n"+
" "+usage_example+"\n")
g_offsets = [int(arg) for arg in argv[1:]]
# --- Now (finally) read the lines in the standard input ----
n_snapshots = 0
lines = []
in_file = sys.stdin
for line_orig in in_file:
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
else:
line = line_orig.rstrip('\n')
# Blank lines in a trajectory file usually signal the end of the
# coordinates for that snapshot in the trajectory, and the beginning
# of the next snapshot.
if len(line_orig.strip()) == 0:
if n_snapshots > 0:
sys.stdout.write(g_delimeter_snapshot)
if len(lines) > 0:
ProcessSnapshot(lines,
sys.stdout,
g_offsets,
g_period,
g_nskip,
g_delimeter_atom,
g_delimeter_monomer)
n_snapshots += 1
# Clear the lines buffer to begin reading the new snapshot
del lines[:]
else:
if len(line.strip()) > 0:
lines.append(line)
if len(lines) > 0:
if n_snapshots > 0:
sys.stdout.write(g_delimeter_snapshot)
# After reading all of the lines in the file, deal with any lines
# left over since reading the last frame
ProcessSnapshot(lines,
sys.stdout,
g_offsets,
g_period,
g_nskip,
g_delimeter_atom,
g_delimeter_monomer)
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)
|
zhangda89/python-oauth2
|
refs/heads/master
|
oauth2/__init__.py
|
458
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
Automatiqa/data-entry-clerk
|
refs/heads/master
|
dec/config/__init__.py
|
1
|
"""
Configuration package.
The Data Entry Clerk's behavior can be customized through the configuration.
For each environment, a separate configuration file can be created to overwrite
settings just for that environment. This makes it easy to test changes before
deploying them to production.
"""
|
alexandermerritt/tools
|
refs/heads/master
|
src/memory/likwid-tlb.py
|
1
|
#! /usr/bin/env python
import os
import sys
import subprocess as sp
if len(sys.argv) < 1:
print("Error: specify command to monitor")
sys.exit(1)
# westmere
#counters = 'DTLB_MISSES_WALK_CYCLES:PMC0' + \
# ',ITLB_MISSES_WALK_CYCLES:PMC1' + \
# ',UNCORE_CLOCKTICKS:UPMCFIX'
# ivb-e
counters = 'DTLB_LOAD_MISSES_WALK_DURATION:PMC0,' + \
'DTLB_STORE_MISSES_WALK_DURATION:PMC1,' + \
'ITLB_MISSES_WALK_DURATION:PMC2'
cmd = [ 'likwid-perfctr', '-C', '0', '-O']
cmd.extend(['-g', counters])
cmd.extend(sys.argv[1:])
text = sp.check_output(cmd, universal_newlines=True)
lines = text.split('\n')
n = len(lines)
i = 0
while i < n:
if 'Gups:' in lines[i]:
gups = lines[i].split()[-1]
if 'Event,' not in lines[i]:
i += 1
continue
break
if i >= n or 'Event,' not in lines[i]:
print('bad output')
print(text)
sys.exit(1)
colnames = lines[i].split(',')
i += 1
cycles = 0.0
ticks = 0.0
while i < n:
if 'WALK_DURATION' in lines[i]:
fields = lines[i].split(',')
for f in fields[2:]:
if f != '':
cycles += float(f)
elif 'CPU_CLK_UNHALTED_REF' in lines[i]:
fields = lines[i].split(',')
for f in fields[2:]:
if f != '':
ticks += float(f)
i += 1
print('cycles ticks ratio gups')
print(str(cycles) + ' ' + str(ticks) \
+ ' ' + str(cycles/ticks)) \
+ ' ' + str(gups)
|
Perferom/android_external_chromium_org
|
refs/heads/android-4.4
|
third_party/protobuf/python/google/protobuf/service.py
|
590
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
|
uzh/msregistry
|
refs/heads/master
|
app/api_1_0/admin.py
|
1
|
# Copyright (C) 2018 University of Zurich. All rights reserved.
#
"""
Implementation of the 'admin' API for the MS-Registry backend.
The 'admin' API allows limited modification of entities in the
database.
"""
#
# This file is part of MSRegistry Backend.
#
# MSRegistry Backend is free software: you can redistribute it and/or
# modify it under the terms of the version 3 of the GNU Affero General
# Public License as published by the Free Software Foundation, or any
# other later version.
#
# MSRegistry Backend is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the version
# 3 of the GNU Affero General Public License for more details.
#
# You should have received a copy of the version 3 of the GNU Affero
# General Public License along with MSRegistry Backend. If not, see
# <http://www.gnu.org/licenses/>.
__author__ = "Sergio Maffioletti <sergio.maffioletti@uzh.ch>"
__copyright__ = "Copyright (c) 2018 University of Zurich"
from datetime import datetime
from functools import wraps
import json
from syslog import *
from flask import abort, current_app, jsonify, request
from flask_httpauth import HTTPBasicAuth
from mongoalchemy.exceptions import BadValueException
from app.exceptions import (
InvalidAuthentication,
SurveyNotFound,
MethodNotAllowed,
UserNotFound
)
from app.models.survey import Survey
from app import utils
from . import api
httpbasicauth = HTTPBasicAuth() # pylint: disable=invalid-name
# Simple username/password authentication.
@httpbasicauth.get_password
def get_cleartext_password(username): # pylint: disable=missing-docstring
if username == current_app.config['AUTH_USER']:
return current_app.config['ACCESS_KEY']
# 401 ("Unauthorized") seems the correct status code here: a
# different user *may* be authenticated here, so retrying makes
# sense
raise InvalidAuthentication()
# pylint: disable=missing-docstring,invalid-name
def only_authorized_ip_addresses(fn):
@wraps(fn)
def with_auth_ip(*args, **kwargs):
if request:
if request.remote_addr not in current_app.config['AUTH_IP']:
current_app.logger.warning(
"IP address {0} not allowed to access admin API.",
request.remote_addr)
# 403 ("Forbidden") is the correct status code here
# according to the HTTP spec ("Authorization will not
# help and the request SHOULD NOT be repeated.")
# Also, do not bother to return a properly formatted
# JSON message for REST API consumption -- if somebody
# is attempting unauthorized access, the last thing we
# want to do is give 'em hints at how to properly do
# their requests...
abort(403)
else:
current_app.warning(
"No request context, cannot check IP authorization!")
return fn(*args, **kwargs)
return with_auth_ip
class _AuditLog(object):
_facility = {
'KERN': LOG_KERN,
'USER': LOG_USER,
'MAIL': LOG_MAIL,
'DAEMON': LOG_DAEMON,
'AUTH': LOG_AUTH,
'LPR': LOG_LPR,
'NEWS': LOG_NEWS,
'UUCP': LOG_UUCP,
'CRON': LOG_CRON,
'SYSLOG': LOG_SYSLOG,
'AUTHPRIV': LOG_AUTH,
'LOCAL0': LOG_LOCAL0,
'LOCAL1': LOG_LOCAL1,
'LOCAL2': LOG_LOCAL2,
'LOCAL3': LOG_LOCAL3,
'LOCAL4': LOG_LOCAL4,
'LOCAL5': LOG_LOCAL5,
'LOCAL6': LOG_LOCAL6,
'LOCAL7': LOG_LOCAL7,
}
def __init__(self):
self._syslog = None
def __call__(self, msg):
if self._syslog is None:
openlog(
ident=current_app.config.get(
'MONGOALCHEMY_DATABASE', 'msregistry-api'),
logoption=(LOG_PID|LOG_CONS|LOG_NDELAY),
facility=self._facility[
current_app.config.get(
'AUDIT_LOG_FACILITY', 'authpriv').upper()]
)
syslog(msg)
_audit_log = _AuditLog()
def add_to_audit_log(action, **data):
data['action'] = action
data['timestamp'] = datetime.now().isoformat()
data['from'] = request.remote_addr
_audit_log(json.dumps(data))
# Admin endpoints for Survey management
## GET operations
@api.route('/admin/survey', methods=['GET'])
@only_authorized_ip_addresses
@httpbasicauth.login_required
def get_all_surveys():
"""
Get all surveys for all users.
"""
survey = Survey()
try:
return jsonify(surveys=[ob.serialize() for ob in survey.getAll()])
except ValueError as error:
raise MethodNotAllowed(error.message)
except BadValueException as error:
raise MethodNotAllowed(error.message)
finally:
add_to_audit_log('get_all_surveys')
@api.route('/admin/survey/user/<string:_uid>', methods=['GET'])
@only_authorized_ip_addresses
@httpbasicauth.login_required
def get_all_surveys_by_user(_uid):
"""
Get all surveys for a given user
"""
survey = Survey()
try:
return jsonify(
surveys=[
ob.serialize()
for ob in survey.getAllByUniqueID(
_uid,
utils.Time.Iso8601ToDatetime(request.args.get('from', None)),
utils.Time.Iso8601ToDatetime(request.args.get('until', None)),
(request.args.get('tags').split(',')
if request.args.get('tags', None) is not None
else None),
utils.json.Json._getJSONBool(request.args.get('ongoing', None)))])
except ValueError as error:
raise MethodNotAllowed(error.message)
except BadValueException as error:
raise MethodNotAllowed(error.message)
except UserNotFound as error:
raise UserNotFound(_uid)
finally:
add_to_audit_log('get_all_surveys_by_user', user_id=_uid)
## POST operations
@api.route('/admin/survey/<string:_id>', methods=['POST'])
@only_authorized_ip_addresses
@httpbasicauth.login_required
def update_user_survey_by_id(_id):
"""
Update/replace existing survey by _id
"""
survey = Survey()
content = request.get_json(silent=True, force=True)
old = 'NO OLD SURVEY FOUND!'
try:
old = survey.getByID(_id).serialize()
return jsonify(
success=bool(
survey.updateByUniqueID(
_id,
content['survey'],
content['tags'],
content['ongoing'])))
except ValueError as error:
raise MethodNotAllowed(error.message)
except BadValueException as error:
raise MethodNotAllowed(error.message)
except SurveyNotFound as error:
raise SurveyNotFound(_id)
finally:
add_to_audit_log(
'update_user_survey_by_id',
survey_id=_id,
new=content,
old=old,
)
## DELETE operations
@api.route('/admin/survey/<string:_id>', methods=['DELETE'])
@only_authorized_ip_addresses
@httpbasicauth.login_required
def delete_survey_by_id(_id):
"""
Delete existing survey by _id
"""
survey = Survey()
old = 'NO OLD SURVEY FOUND!'
try:
old = survey.getByID(_id).serialize()
return jsonify(success=bool(survey.deleteByUniqueID(_id)))
except ValueError as error:
raise MethodNotAllowed(error.message)
except BadValueException as error:
raise MethodNotAllowed(error.message)
except SurveyNotFound as error:
raise SurveyNotFound(_id)
finally:
add_to_audit_log(
'delete_survey_by_id',
survey_id=_id,
old=old,
)
|
jorsea/odoomrp-wip
|
refs/heads/8.0
|
delivery_multi_expense/models/delivery_carrier.py
|
25
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
transport = fields.Boolean(
'Is transporter', default=True, help="Determines whether the carrier"
" method")
|
minhphung171093/GreenERP_V9
|
refs/heads/master
|
openerp/addons/event/report/__init__.py
|
69
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import report_event_registration
|
jonashaag/jedi
|
refs/heads/master
|
jedi/debug.py
|
34
|
from jedi._compatibility import encoding, is_py3, u
import inspect
import os
import time
try:
if os.name == 'nt':
# does not work on Windows, as pyreadline and colorama interfere
raise ImportError
else:
# Use colorama for nicer console output.
from colorama import Fore, init
from colorama import initialise
# pytest resets the stream at the end - causes troubles. Since after
# every output the stream is reset automatically we don't need this.
initialise.atexit_done = True
init()
except ImportError:
class Fore(object):
RED = ''
GREEN = ''
YELLOW = ''
RESET = ''
NOTICE = object()
WARNING = object()
SPEED = object()
enable_speed = False
enable_warning = False
enable_notice = False
# callback, interface: level, str
debug_function = None
ignored_modules = ['jedi.evaluate.builtin', 'jedi.parser']
_debug_indent = -1
_start_time = time.time()
def reset_time():
global _start_time, _debug_indent
_start_time = time.time()
_debug_indent = -1
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
try:
result = func(*args, **kwargs)
finally:
_debug_indent -= 1
return result
return wrapper
def dbg(message, *args):
""" Looks at the stack, to see if a debug message should be printed. """
if debug_function and enable_notice:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not (mod.__name__ in ignored_modules):
i = ' ' * _debug_indent
debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
def warning(message, *args):
if debug_function and enable_warning:
i = ' ' * _debug_indent
debug_function(WARNING, i + 'warning: ' + message % tuple(u(repr(a)) for a in args))
def speed(name):
if debug_function and enable_speed:
now = time.time()
i = ' ' * _debug_indent
debug_function(SPEED, i + 'speed: ' + '%s %s' % (name, now - _start_time))
def print_to_stdout(level, str_out):
""" The default debug function """
if level == NOTICE:
col = Fore.GREEN
elif level == WARNING:
col = Fore.RED
else:
col = Fore.YELLOW
if not is_py3:
str_out = str_out.encode(encoding, 'replace')
print(col + str_out + Fore.RESET)
# debug_function = print_to_stdout
|
nttks/jenkins-test
|
refs/heads/gacco/birch
|
cms/djangoapps/contentstore/features/component.py
|
1
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Lettuce formats proposed definitions for unimplemented steps with the
# argument name "step" instead of "_step" and pylint does not like that.
# pylint: disable=unused-argument
from lettuce import world, step
from nose.tools import assert_true, assert_in, assert_equal # pylint: disable=no-name-in-module
DISPLAY_NAME = "Display Name"
@step(u'I add this type of single step component:$')
def add_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
world.create_component_instance(
step=step,
category='{}'.format(component.lower()),
)
@step(u'I see this type of single step component:$')
def see_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
component_css = 'div.xmodule_{}Module'.format(component)
assert_true(world.is_css_present(component_css),
"{} couldn't be found".format(component))
@step(u'I add this type of( Advanced)? (HTML|Problem) component:$')
def add_a_multi_step_component(step, is_advanced, category):
for step_hash in step.hashes:
world.create_component_instance(
step=step,
category='{}'.format(category.lower()),
component_type=step_hash['Component'],
is_advanced=bool(is_advanced),
)
@step(u'I see (HTML|Problem) components in this order:')
def see_a_multi_step_component(step, category):
# Wait for all components to finish rendering
selector = 'li.studio-xblock-wrapper div.xblock-student_view'
world.wait_for(lambda _: len(world.css_find(selector)) == len(step.hashes))
for idx, step_hash in enumerate(step.hashes):
if category == 'HTML':
html_matcher = {
'Text': '\n \n',
'Announcement': '<p> Words of encouragement! This is a short note that most students will read. </p>',
'Zooming Image': '<h2>ZOOMING DIAGRAMS</h2>',
'E-text Written in LaTeX': '<h2>Example: E-text page</h2>',
'Raw HTML': '<p>This template is similar to the Text template. The only difference is',
}
actual_html = world.css_html(selector, index=idx)
assert_in(html_matcher[step_hash['Component']], actual_html)
else:
actual_text = world.css_text(selector, index=idx)
assert_in(step_hash['Component'].upper(), actual_text)
@step(u'I see a "([^"]*)" Problem component$')
def see_a_problem_component(step, category):
component_css = 'div.xmodule_CapaModule'
assert_true(world.is_css_present(component_css),
'No problem was added to the unit.')
problem_css = 'li.studio-xblock-wrapper div.xblock-student_view'
assert_true(world.css_contains_text(problem_css, category.upper()))
@step(u'I add a "([^"]*)" "([^"]*)" component$')
def add_component_category(step, component, category):
assert category in ('single step', 'HTML', 'Problem', 'Advanced Problem')
given_string = 'I add this type of {} component:'.format(category)
step.given('{}\n{}\n{}'.format(given_string, '|Component|', '|{}|'.format(component)))
@step(u'I delete all components$')
def delete_all_components(step):
count = len(world.css_find('ol.reorderable-container li.studio-xblock-wrapper'))
step.given('I delete "' + str(count) + '" component')
@step(u'I delete "([^"]*)" component$')
def delete_components(step, number):
world.wait_for_xmodule()
delete_btn_css = 'a.delete-button'
prompt_css = 'div#prompt-warning'
btn_css = '{} a.button.action-primary'.format(prompt_css)
saving_mini_css = 'div#page-notification .wrapper-notification-mini'
for _ in range(int(number)):
world.css_click(delete_btn_css)
assert_true(
world.is_css_present('{}.is-shown'.format(prompt_css)),
msg='Waiting for the confirmation prompt to be shown')
# Pressing the button via css was not working reliably for the last component
# when run in Chrome.
if world.browser.driver_name is 'Chrome':
world.browser.execute_script("$('{}').click()".format(btn_css))
else:
world.css_click(btn_css)
# Wait for the saving notification to pop up then disappear
if world.is_css_present('{}.is-shown'.format(saving_mini_css)):
world.css_find('{}.is-hiding'.format(saving_mini_css))
@step(u'I see no components')
def see_no_components(steps):
assert world.is_css_not_present('li.studio-xblock-wrapper')
@step(u'I delete a component')
def delete_one_component(step):
world.css_click('a.delete-button')
@step(u'I edit and save a component')
def edit_and_save_component(step):
world.css_click('.edit-button')
world.css_click('.save-button')
@step(u'I duplicate the (first|second|third) component$')
def duplicated_component(step, ordinal):
ord_map = {
"first": 0,
"second": 1,
"third": 2,
}
index = ord_map[ordinal]
duplicate_btn_css = 'a.duplicate-button'
world.css_click(duplicate_btn_css, int(index))
@step(u'I see a Problem component with display name "([^"]*)" in position "([^"]*)"$')
def see_component_in_position(step, display_name, index):
component_css = 'div.xmodule_CapaModule'
def find_problem(_driver):
return world.css_text(component_css, int(index)).startswith(display_name.upper())
world.wait_for(find_problem, timeout_msg='Did not find the duplicated problem')
@step(u'I see the display name is "([^"]*)"')
def check_component_display_name(step, display_name):
# The display name for the unit uses the same structure, must differentiate by level-element.
label = world.css_html("section.level-element>header>div>div>span.xblock-display-name")
assert_equal(display_name, label)
@step(u'I change the display name to "([^"]*)"')
def change_display_name(step, display_name):
world.edit_component_and_select_settings()
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, display_name)
world.save_component()
@step(u'I unset the display name')
def unset_display_name(step):
world.edit_component_and_select_settings()
world.revert_setting_entry(DISPLAY_NAME)
world.save_component()
|
britram/qof
|
refs/heads/master
|
pytools/qof_obsloss.py
|
1
|
import argparse
import bz2
from sys import stdin, stdout, stderr
import ipfix
import qof
import pandas as pd
import numpy as np
args = None
def parse_args():
global args
parser = argparse.ArgumentParser(description="Report on observation loss in a QoF-produced IPFIX file")
parser.add_argument('--spec', '-s', metavar="specfile", action="append",
help="file to load additional IESpecs from")
parser.add_argument('--file', '-f', metavar="file",
help="IPFIX file to read (default stdin)")
parser.add_argument('--bin', '-b', metavar="sec", type=int, default=None,
help="Output binned flow and octet loss rates")
parser.add_argument('--bzip2', '-j', action="store_const", const=True,
help="Decompress bz2-compressed IPFIX file")
args = parser.parse_args()
def init_ipfix(specfiles = None):
ipfix.types.use_integer_ipv4()
ipfix.ie.use_iana_default()
ipfix.ie.use_5103_default()
if specfiles:
for sf in specfiles:
ipfix.ie.use_specfile(sf)
def obsloss_report_stream_biflow(ipfix_stream, timeslice = 0):
df = qof.dataframe_from_ipfix_stream(ipfix_stream, (
"flowStartMilliseconds", "flowEndMilliseconds",
"packetDeltaCount", "reversePacketDeltaCount",
"transportPacketDeltaCount", "reverseTransportPacketDeltaCount",
"octetDeltaCount", "reverseOctetDeltaCount",
"transportOctetDeltaCount", "reverseTransportOctetDeltaCount",
"tcpSequenceCount", "reverseTcpSequenceCount",
"tcpSequenceLossCount", "reverseTcpSequenceLossCount"))
allcount = len(df)
df["lossy"] = (df["tcpSequenceLossCount"] > 0) | (df["reverseTcpSequenceLossCount"] > 0)
lossycount = len(df[df["lossy"]])
print ("Total flows: %u " % (allcount))
print (" of which lossy: %u (%.2f%%)" % (lossycount, lossycount * 100 / allcount))
if timeslice:
lossy_flows = np.where(df["lossy"], 1, 0)
lossy_flows.index = df["flowEndMilliseconds"]
total_flows = pd.Series(1, index=lossy_flows.index)
lossy_flows = lossy_flows.resample(str(timeslice)+"S", how='sum')
total_flows = total_flows.resample(str(timeslice)+"S", how='sum')
lossy_flow_rate = lossy_flows / total_flows;
dfout = pd.DataFrame({'total': total_flows,
'lossy': lossy_flows,
'rate': lossy_flow_rate})
dfout.to_csv(stdout)
## begin main ##
parse_args()
init_ipfix(args.spec)
if args.file:
if args.bzip2:
with bz2.open (args.file, mode="rb") as f:
obsloss_report_stream_biflow(f, args.bin)
else:
with open (args.file, mode="rb") as f:
obsloss_report_stream_biflow(f, args.bin)
else:
stdin = stdin.detach()
obsloss_report_stream_biflow(stdin, args.bin)
|
jlegendary/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/playvid.py
|
115
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TEST = {
'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
'info_dict': {
'id': 'RnmBNgtrrJu',
'ext': 'mp4',
'title': 'md5:9256d01c6317e3f703848b5906880dc8',
'duration': 82,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse_unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
|
zyantific/remodel
|
refs/heads/master
|
testing/gtest-1.7.0/test/gtest_test_utils.py
|
1100
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
SakuradaJun/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/foursquare/views.py
|
71
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import FoursquareProvider
class FoursquareOAuth2Adapter(OAuth2Adapter):
provider_id = FoursquareProvider.id
access_token_url = 'https://foursquare.com/oauth2/access_token'
# Issue ?? -- this one authenticates over and over again...
# authorize_url = 'https://foursquare.com/oauth2/authorize'
authorize_url = 'https://foursquare.com/oauth2/authenticate'
profile_url = 'https://api.foursquare.com/v2/users/self'
def complete_login(self, request, app, token, **kwargs):
# Foursquare needs a version number for their API requests as documented here https://developer.foursquare.com/overview/versioning
resp = requests.get(self.profile_url,
params={'oauth_token': token.token, 'v': '20140116'})
extra_data = resp.json()['response']['user']
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(FoursquareOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FoursquareOAuth2Adapter)
|
mjudsp/Tsallis
|
refs/heads/master
|
sklearn/linear_model/tests/test_huber.py
|
25
|
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=5, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=1000000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
|
lean-poker/poker-player-python
|
refs/heads/master
|
player.py
|
3
|
class Player:
VERSION = "Default Python folding player"
def betRequest(self, game_state):
return 0
def showdown(self, game_state):
pass
|
muntasirsyed/intellij-community
|
refs/heads/master
|
python/testData/resolve/ListAssignment.py
|
83
|
[foo, bar] = (1, 2)
print(foo)
# <ref>
|
abartlet/samba-old
|
refs/heads/master
|
buildtools/wafsamba/samba_third_party.py
|
26
|
# functions to support third party libraries
from Configure import conf
import sys, Logs, os
from samba_bundled import *
@conf
def CHECK_FOR_THIRD_PARTY(conf):
return os.path.exists(os.path.join(Utils.g_module.srcdir, 'third_party'))
Build.BuildContext.CHECK_FOR_THIRD_PARTY = CHECK_FOR_THIRD_PARTY
@conf
def CHECK_ZLIB(conf):
version_check='''
#if (ZLIB_VERNUM >= 0x1230)
#else
#error "ZLIB_VERNUM < 0x1230"
#endif
z_stream *z;
inflateInit2(z, -15);
'''
return conf.CHECK_BUNDLED_SYSTEM('z', minversion='1.2.3', pkg='zlib',
checkfunctions='zlibVersion',
headers='zlib.h',
checkcode=version_check,
implied_deps='replace')
Build.BuildContext.CHECK_ZLIB = CHECK_ZLIB
@conf
def CHECK_POPT(conf):
return conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h')
Build.BuildContext.CHECK_POPT = CHECK_POPT
|
KIT-XXI/babel
|
refs/heads/master
|
tests/messages/test_mofile.py
|
41
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import os
import unittest
from babel.messages import mofile, Catalog
from babel._compat import BytesIO, text_type
from babel.support import Translations
class ReadMoTestCase(unittest.TestCase):
def setUp(self):
self.datadir = os.path.join(os.path.dirname(__file__), 'data')
def test_basics(self):
mo_path = os.path.join(self.datadir, 'project', 'i18n', 'de',
'LC_MESSAGES', 'messages.mo')
mo_file = open(mo_path, 'rb')
try:
catalog = mofile.read_mo(mo_file)
self.assertEqual(2, len(catalog))
self.assertEqual('TestProject', catalog.project)
self.assertEqual('0.1', catalog.version)
self.assertEqual('Stange', catalog['bar'].string)
self.assertEqual(['Fuhstange', 'Fuhstangen'],
catalog['foobar'].string)
finally:
mo_file.close()
class WriteMoTestCase(unittest.TestCase):
def test_sorting(self):
# Ensure the header is sorted to the first entry so that its charset
# can be applied to all subsequent messages by GNUTranslations
# (ensuring all messages are safely converted to unicode)
catalog = Catalog(locale='en_US')
catalog.add(u'', '''\
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n''')
catalog.add(u'foo', 'Voh')
catalog.add((u'There is', u'There are'), (u'Es gibt', u'Es gibt'))
catalog.add(u'Fizz', '')
catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
buf = BytesIO()
mofile.write_mo(buf, catalog)
buf.seek(0)
translations = Translations(fp=buf)
self.assertEqual(u'Voh', translations.ugettext('foo'))
assert isinstance(translations.ugettext('foo'), text_type)
self.assertEqual(u'Es gibt', translations.ungettext('There is', 'There are', 1))
assert isinstance(translations.ungettext('There is', 'There are', 1), text_type)
self.assertEqual(u'Fizz', translations.ugettext('Fizz'))
assert isinstance(translations.ugettext('Fizz'), text_type)
self.assertEqual(u'Fuzz', translations.ugettext('Fuzz'))
assert isinstance(translations.ugettext('Fuzz'), text_type)
self.assertEqual(u'Fuzzes', translations.ugettext('Fuzzes'))
assert isinstance(translations.ugettext('Fuzzes'), text_type)
def test_more_plural_forms(self):
catalog2 = Catalog(locale='ru_RU')
catalog2.add(('Fuzz', 'Fuzzes'), ('', '', ''))
buf = BytesIO()
mofile.write_mo(buf, catalog2)
|
Brunni/conan-g3log
|
refs/heads/master
|
test_package/conanfile.py
|
1
|
from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "Brunni")
class G3logTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "g3log/master@%s/%s" % (username, channel)
generators = "cmake"
def configure(self):
print("Testing shared library: %s" % self.options["g3log"].shared)
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib", "bin", "bin")
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
|
nikste/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
|
18
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.rnn.python.ops.fused_rnn_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FusedRnnCellTest(test.TestCase):
def testBasicRNNFusedWrapper(self):
"""This test checks that using a wrapper for BasicRNN works as expected."""
with self.test_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
cell = core_rnn_cell_impl.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, state = core_rnn.static_rnn(
cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([packed_outputs, state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope(
"fused_static", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(cell)
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_static_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_static/")
]
sess.run([variables.global_variables_initializer()])
fused_static_outputs, fused_static_state = sess.run([outputs, state])
fused_static_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_static_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_static_vars))
self.assertAllClose(basic_outputs, fused_static_outputs)
self.assertAllClose(basic_state, fused_static_state)
self.assertAllClose(basic_grads, fused_static_grads)
for basic, fused in zip(basic_wgrads, fused_static_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope(
"fused_dynamic", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
cell, use_dynamic_rnn=True)
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_dynamic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_dynamic/")
]
sess.run([variables.global_variables_initializer()])
fused_dynamic_outputs, fused_dynamic_state = sess.run([outputs, state])
fused_dynamic_grads = sess.run(
gradients_impl.gradients(outputs, inputs))
fused_dynamic_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_dynamic_vars))
self.assertAllClose(basic_outputs, fused_dynamic_outputs)
self.assertAllClose(basic_state, fused_dynamic_state)
self.assertAllClose(basic_grads, fused_dynamic_grads)
for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testTimeReversedFusedRNN(self):
with self.test_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
cell = core_rnn_cell_impl.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
# test bi-directional rnn
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, fw_state, bw_state = core_rnn.static_bidirectional_rnn(
cell, cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_fw_state, basic_bw_state = sess.run(
[packed_outputs, fw_state, bw_state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope("fused", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(cell)
fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN(fused_cell)
fw_outputs, fw_state = fused_cell(
inputs, dtype=dtypes.float64, scope="fw")
bw_outputs, bw_state = fused_bw_cell(
inputs, dtype=dtypes.float64, scope="bw")
outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_fw_state, fused_bw_state = sess.run(
[outputs, fw_state, bw_state])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_fw_state, fused_fw_state)
self.assertAllClose(basic_bw_state, fused_bw_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test.main()
|
bmazin/SDR
|
refs/heads/master
|
Projects/Dither/shard_many_telnet.py
|
1
|
#!/bin/python
import subprocess
import time
def move(ra,dec):
commandTemplate ='TELMOVE INSTRUMENT %.2f %.2f SEC_ARC NOWAIT'
command = commandTemplate%(ra,dec)
bashTemplate = '(sleep 1; echo \"%s\") | telnet shard.ucolick.org 2345'# 2> /dev/null > /dev/null'
bashCommand = bashTemplate%command
timestamp = time.strftime("%Y%m%d-%H%M%S",time.gmtime())
print timestamp,command
subprocess.Popen(bashCommand,shell=True)
def raster(step=1,numX=5,numY=5,integrateTime=20):
numXSteps = numX-1
numYSteps = numY-1
moveTime = 1
firstX = -numXSteps/2.0*step
firstY = -numYSteps/2.0*step
move(firstX,firstY)
time.sleep(integrateTime)
for iY in range(numY):
for iX in range(numXSteps):
move(step,0)
time.sleep(integrateTime)
if iY < numY - 1:
move(-numXSteps*step,step)
time.sleep(integrateTime)
move(firstX,firstY)
|
shenmidelin/pylibwyl
|
refs/heads/master
|
wanntb/tran.py
|
1
|
#!/usr/bin/env python
import numpy as np
def tran_op(op, tmat):
"""
transform quantum operator from representation A to
another representation B
Args:
op: the matrix form of operator in representation A
tmat: the unitary transform matrix
"""
return np.dot(np.dot(np.conj(np.transpose(tmat)), op), tmat)
def tmat_c2r(case, ispin=False):
"""
the transform matrix from complex shperical harmonics to
real spherical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
sqrt2 = np.sqrt(2.0)
ci = np.complex128(0.0+1.0j)
cone = np.complex128(1.0+0.0j)
if case.strip() == 'p':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 't2g':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dzx --> py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,0] = ci/sqrt2
t_c2r[2,0] = ci/sqrt2
# dzy --> px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,1] = cone/sqrt2
t_c2r[2,1] = -cone/sqrt2
# dxy --> pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'd':
nband = 5
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dz2=|2,0>
t_c2r[2,0] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,4] = ci/sqrt2
t_c2r[4,4] = -ci/sqrt2
elif case.strip() == 'f':
nband = 7
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 0] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 1] = cone/sqrt2
t_c2r[4, 1] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 2] = ci/sqrt2
t_c2r[4, 2] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 3] = cone/sqrt2
t_c2r[5, 3] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 4] = ci/sqrt2
t_c2r[5, 4] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 5] = cone/sqrt2
t_c2r[6, 5] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 6] = ci/sqrt2
t_c2r[6, 6] = ci/sqrt2
else:
print "don't support t_c2r for this case: ", case
return
if ispin:
norbs=2*nband
t_c2r_spin = np.zeros((norbs,norbs), dtype=np.complex128)
t_c2r_spin[0:norbs:2,0:norbs:2] = t_c2r
t_c2r_spin[1:norbs:2,1:norbs:2] = t_c2r
return t_c2r_spin
else:
return t_c2r
def tmat_r2c(case, ispin=False):
"""
the transform matrix from real spherical harmonics to
complex shperical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
return np.conj(np.transpose(tmat_c2r(case, ispin)))
def tmat_r2cub(ispin=False):
"""
the transform matrix from real spherical harmonics to the cubic
spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
a = np.sqrt(10.0) / 4.0 + 0.0j
b = np.sqrt(6.0) / 4.0 + 0.0j
c = 1.0 + 0.0j
nband = 7
t_r2cub = np.zeros((nband,nband), dtype=np.complex128)
# fx3 = -sqrt(6)/4 fxz2 + sqrt(10)/4 fx(x2-3y2)
t_r2cub[1, 0] = -b
t_r2cub[5, 0] = a
# fy3 = -sqrt(6)/4 fyz2 - sqrt(10)/4 fy(3x2-y2)
t_r2cub[2, 1] = -b
t_r2cub[6, 1] = -a
# fz3 = fz3
t_r2cub[0, 2] = c
# fx(y2-z2) = -sqrt(10)/4 fxz2 - sqrt(6)/4 fx(x2-3y2)
t_r2cub[1, 3] = -a
t_r2cub[5, 3] = -b
# fy(z2-x2) = sqrt(10)/4 fyz2 - sqrt(6)/4 fy(3x2-y2)
t_r2cub[2, 4] = a
t_r2cub[6, 4] = -b
# fz(x2-y2) = fz(x2-y2)
t_r2cub[3, 5] = c
# fxyz = fxyz
t_r2cub[4, 6] = c
if ispin:
norbs = 2 * nband
t_r2cub_spin = np.zeros((norbs, norbs), dtype=np.complex128)
t_r2cub_spin[0:norbs:2,0:norbs:2] = t_r2cub
t_r2cub_spin[1:norbs:2,1:norbs:2] = t_r2cub
return t_r2cub_spin
else:
return t_r2cub
def tmat_cub2r(ispin=False):
"""
the transform matrix from cubic spherical harmonics to
real spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
return np.conj( np.transpose( tmat_r2cub(ispin) ) )
def tmat_c2j(l):
"""
the transform matrix from complex shperical harmonics to
j2,jz basis
Args:
case: label for different systems
"""
if l == 1:
t_c2j = np.zeros((6, 6), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(2.0/3.0)
t_c2j[3,0] = np.sqrt(1.0/3.0)
t_c2j[2,1] = -np.sqrt(1.0/3.0)
t_c2j[5,1] = np.sqrt(2.0/3.0)
t_c2j[1,2] = 1.0
t_c2j[0,3] = np.sqrt(1.0/3.0)
t_c2j[3,3] = np.sqrt(2.0/3.0)
t_c2j[2,4] = np.sqrt(2.0/3.0)
t_c2j[5,4] = np.sqrt(1.0/3.0)
t_c2j[4,5] = 1.0
return t_c2j
elif l == 2:
t_c2j = np.zeros((10, 10), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(4.0/5.0)
t_c2j[3,0] = np.sqrt(1.0/5.0)
t_c2j[2,1] = -np.sqrt(3.0/5.0)
t_c2j[5,1] = np.sqrt(2.0/5.0)
t_c2j[4,2] = -np.sqrt(2.0/5.0)
t_c2j[7,2] = np.sqrt(3.0/5.0)
t_c2j[6,3] = -np.sqrt(1.0/5.0)
t_c2j[9,3] = np.sqrt(4.0/5.0)
t_c2j[1,4] = 1.0
t_c2j[0,5] = np.sqrt(1.0/5.0)
t_c2j[3,5] = np.sqrt(4.0/5.0)
t_c2j[2,6] = np.sqrt(2.0/5.0)
t_c2j[5,6] = np.sqrt(3.0/5.0)
t_c2j[4,7] = np.sqrt(3.0/5.0)
t_c2j[7,7] = np.sqrt(2.0/5.0)
t_c2j[6,8] = np.sqrt(4.0/5.0)
t_c2j[9,8] = np.sqrt(1.0/5.0)
t_c2j[8,9] = 1.0
return t_c2j
elif l == 3:
t_c2j = np.zeros((14,14), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(6.0/7.0)
t_c2j[3,0] = np.sqrt(1.0/7.0)
t_c2j[2,1] = -np.sqrt(5.0/7.0)
t_c2j[5,1] = np.sqrt(2.0/7.0)
t_c2j[4,2] = -np.sqrt(4.0/7.0)
t_c2j[7,2] = np.sqrt(3.0/7.0)
t_c2j[6,3] = -np.sqrt(3.0/7.0)
t_c2j[9,3] = np.sqrt(4.0/7.0)
t_c2j[8,4] = -np.sqrt(2.0/7.0)
t_c2j[11,4] = np.sqrt(5.0/7.0)
t_c2j[10,5] = -np.sqrt(1.0/7.0)
t_c2j[13,5] = np.sqrt(6.0/7.0)
t_c2j[1,6] = 1.0
t_c2j[0,7] = np.sqrt(1.0/7.0)
t_c2j[3,7] = np.sqrt(6.0/7.0)
t_c2j[2,8] = np.sqrt(2.0/7.0)
t_c2j[5,8] = np.sqrt(5.0/7.0)
t_c2j[4,9] = np.sqrt(3.0/7.0)
t_c2j[7,9] = np.sqrt(4.0/7.0)
t_c2j[6,10] = np.sqrt(4.0/7.0)
t_c2j[9,10] = np.sqrt(3.0/7.0)
t_c2j[8,11] = np.sqrt(5.0/7.0)
t_c2j[11,11] = np.sqrt(2.0/7.0)
t_c2j[10,12] = np.sqrt(6.0/7.0)
t_c2j[13,12] = np.sqrt(1.0/7.0)
t_c2j[12,13] = 1.0
return t_c2j
else:
print "NOT Implemented !!!"
def fourier_hr2hk(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
for j in range(nrpt):
coef = -2*np.pi*np.dot(kvec[i,:], rvec[j,:])
ratio = (np.cos(coef) + np.sin(coef) * 1j) / float(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
|
soxfmr/Mint-X-icons-Convert
|
refs/heads/master
|
utils.py
|
1
|
#!/usr/bin/python2
def composepath(*dirs):
return "/".join(dirs)
|
senadmd/coinmarketwatch
|
refs/heads/master
|
test/functional/proxy_test.py
|
19
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
SELO77/seloPython
|
refs/heads/master
|
MMTseminar/threading0.py
|
1
|
import threading
import time
import random
def worker(number):
sleep = random.randrange(1, 10)
time.sleep(sleep)
print("I am Worker {}, I slept for {} seconds".format(number, sleep))
for i in range(5):
t = threading.Thread(target=worker, args=(i,))
t.start()
print("All Threads are queued, let's see when they finish!")
|
daajoe/trellis
|
refs/heads/master
|
smac/genericWrapper.py
|
1
|
#!/usr/bin/env python2.7
# encoding: utf-8
'''
generaicWrapper -- template for an AClib target algorithm wrapper
abstract methods for generation of callstring and parsing of solver output
@author: Marius Lindauer, Chris Fawcett, Alex Fréchette, Frank Hutter
@copyright: 2014 AClib. All rights reserved.
@license: GPL
@contact: lindauer@informatik.uni-freiburg.de, fawcettc@cs.ubc.ca, afrechet@cs.ubc.ca, fh@informatik.uni-freiburg.de
@note: example call: python src/generic_wrapper/spearWrapper.py --runsolver ./target_algorithms/runsolver/runsolver-3.3.4/src/runsolver -- <instance> <instance specific> <cutoff> <runlength> <seed>
@warning: use "--" after the last additional argument of the wrapper to deactivate prefix matching!
'''
import sys
import os
import signal
import time
import re
import random
import traceback
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
__all__ = []
__version__ = 0.1
__authors__ = 'Marius Lindauer, Chris Fawcett, Alex Fréchette, Frank Hutter'
__date__ = '2014-03-06'
__updated__ = '2014-03-21'
def signalHandler(signum, frame):
sys.exit(2)
class AbstractWrapper(object):
'''
abstract solver wrapper
'''
def __init__(self):
'''
Constructor
'''
#program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = "%%(prog)s %s (%s)" % (program_version, program_build_date)
program_shortdesc = __import__("__main__").__doc__.split("\n")[1]
program_license = '''%s
Created by %s on %s.
Copyright 2014 - AClib. All rights reserved.
Licensed under the GPLv2
http://www.gnu.org/licenses/gpl-2.0.html
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__authors__), str(__date__))
self.parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter, add_help=False)
self.args = None
self.RESULT_MAPPING = {'SUCCESS': "SAT"}
self._watcher_file = None
self._solver_file = None
self._instance = ""
self._specifics = ""
self._cutoff = 0.0
self._runlength = 0
self._seed = 0
self._exit_code = None
self._runsolver = None
self._mem_limit = 8192
self._tmp_dir = None
self._crashed_if_non_zero_status = True
self._subprocesses = []
self._DEBUG = True
self._DELAY2KILL = 2
self._ta_status = "EXTERNALKILL"
self._ta_runtime = 999999999.0
self._ta_runlength = -1
self._ta_quality = -1
self._ta_exit_code = None
self._ta_misc = ""
def print_d(self, str_):
if self._DEBUG:
print(str_)
def main(self, argv=None):
''' parse command line'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
signal.signal(signal.SIGTERM, signalHandler)
signal.signal(signal.SIGQUIT, signalHandler)
signal.signal(signal.SIGINT, signalHandler)
# Setup argument parser
run_group = self.parser.add_argument_group("Run")
run_group.add_argument("--runsolver-path", dest="runsolver", default=os.path.join(os.path.join(os.path.dirname(__file__),"runsolver"), "runsolver"), help="path to runsolver binary (if None, the runsolver is deactivated)")
run_group.add_argument("--temp-file-dir", dest="tmp_dir", default=".", help="directory for temporary files (relative to -exec-dir in SMAC scenario)")
run_group.add_argument("--mem-limit", dest="mem_limit", default=self._mem_limit, type=int, help="memory limit in MB")
run_group.add_argument("--internal", dest="internal", default=False, action="store_true", help="skip calling an external target algorithm")
run_group = self.parser.add_argument_group("External Callstring Generation and Output Parsing")
run_group.add_argument("--ext-callstring", dest="ext_callstring", default=None, help="Command to get call string via external program;" +
"your programm gets a file with"+
"first line: instance name,"+
"second line: seed"+
"further lines: paramter name, paramater value;"+
"output: one line with callstring for target algorithm")
run_group.add_argument("--ext-parsing", dest="ext_parsing", default=None, help="Command to use an external program to parse the output of your target algorihm;" +
"only paramter: name of output file;"+
"output of your progam:"+
"status: SAT|UNSAT|TIMEOUT|CRASHED\n"+
"quality: <integer>\n"+
"misc: <string>")
help_group = self.parser.add_argument_group("Help")
help_group.add_argument("--help", dest="show_help", action="store_true", help="show this help message")
# Process arguments
self.args, target_args = self.parser.parse_known_args()
args = self.args
if args.show_help:
self.parser.print_help()
self._ta_status = "ABORT"
self._ta_misc = "help was requested..."
self._exit_code = 1
sys.exit(1)
if args.runsolver != "None" and not os.path.isfile(args.runsolver) and not args.internal:
self._ta_status = "ABORT"
self._ta_misc = "runsolver is missing - should have been at %s." % (args.runsolver)
self._exit_code = 1
sys.exit(1)
else:
self._runsolver = args.runsolver
self._mem_limit = args.mem_limit
if not os.path.isdir(args.tmp_dir):
self._ta_status = "ABORT"
self._ta_misc = "temp directory is missing - should have been at %s." % (args.tmp_dir)
self._exit_code = 1
sys.exit(1)
else:
self._tmp_dir = args.tmp_dir
if len(target_args) < 5:
self._ta_status = "ABORT"
self._ta_misc = "some required TA parameters (instance, specifics, cutoff, runlength, seed) missing - was [%s]." % (" ".join(target_args))
self._exit_code = 1
sys.exit(1)
config_dict = self.build_parameter_dict(target_args)
runargs = {
"instance": self._instance,
"specifics" : self._specifics,
"cutoff" : self._cutoff,
"runlength" : self._runlength,
"seed" : self._seed
}
if args.ext_callstring:
target_cmd = self.get_command_line_args_ext(runargs=runargs, config=config_dict, ext_call=args.ext_callstring).split(" ")
else:
target_cmd = self.get_command_line_args(runargs=runargs, config=config_dict).split(" ")
if not args.internal:
self.call_target(target_cmd)
self.read_runsolver_output()
if args.ext_parsing:
resultMap = self.process_results_ext(self._solver_file, {"exit_code" : self._ta_exit_code}, ext_call=args.ext_parsing)
else:
resultMap = self.process_results(self._solver_file, {"exit_code" : self._ta_exit_code})
if ('status' in resultMap):
self._ta_status = self.RESULT_MAPPING.get(resultMap['status'],resultMap['status'])
if ('runtime' in resultMap):
self._ta_runtime = resultMap['runtime']
if ('quality' in resultMap):
self._ta_quality = resultMap['quality']
if ('misc' in resultMap):
self._ta_misc = resultMap['misc']
# if still no status was determined, something went wrong and output files should be kept
if self._ta_status is "EXTERNALKILL":
self._ta_status = "CRASHED"
sys.exit()
except (KeyboardInterrupt, SystemExit):
self.cleanup()
self.print_result_string()
if self._ta_exit_code:
sys.exit(self._ta_exit_code)
elif self._exit_code:
sys.exit(self._exit_code)
else:
sys.exit(0)
def build_parameter_dict(self, arg_list):
'''
Reads all arguments which were not parsed by ArgumentParser,
extracts all meta information
and builds a mapping: parameter name -> parameter value
Format Assumption: <instance> <specifics> <runtime cutoff> <runlength> <seed> <solver parameters>
Args:
list of all options not parsed by ArgumentParser
'''
self._instance = arg_list[1]
self._specifics = arg_list[2]
self._cutoff = int(float(arg_list[3]) + 1) # runsolver only rounds down to integer
self._runlength = int(arg_list[4])
self._seed = int(arg_list[5])
params = arg_list[6:]
if (len(params)/2)*2 != len(params):
self._ta_status = "ABORT"
self._ta_misc = "target algorithm parameter list MUST have even length - found %d arguments." % (len(params))
self.print_d(" ".join(params))
self._exit_code = 1
sys.exit(1)
return dict((name, value) for name, value in zip(params[::2], params[1::2]))
def call_target(self, target_cmd):
'''
extends the target algorithm command line call with the runsolver
and executes it
Args:
list of target cmd (from getCommandLineArgs)
'''
random_id = random.randint(0,1000000)
self._watcher_file = NamedTemporaryFile(suffix=".log", prefix="watcher-%d-" %(random_id), dir=self._tmp_dir, delete=False)
self._solver_file = NamedTemporaryFile(suffix=".log", prefix="solver-%d-" %(random_id), dir=self._tmp_dir, delete=False)
runsolver_cmd = []
if self._runsolver != "None":
runsolver_cmd = [self._runsolver, "-M", self._mem_limit, "-C", self._cutoff,
"-w", self._watcher_file.name,
"-o", self._solver_file.name]
runsolver_cmd.extend(target_cmd)
#for debugging
self.print_d("Calling runsolver. Command-line:")
self.print_d(" ".join(map(str,runsolver_cmd)))
# run
try:
if self._runsolver != "None":
io = Popen(map(str, runsolver_cmd), shell=False, preexec_fn=os.setpgrp)
else:
io = Popen(map(str, runsolver_cmd), stdout=self._solver_file, shell=False, preexec_fn=os.setpgrp)
self._subprocesses.append(io)
io.wait()
self._subprocesses.remove(io)
if io.stdout:
io.stdout.flush()
except OSError:
self._ta_status = "ABORT"
self._ta_misc = "execution failed: %s" % (" ".join(map(str,runsolver_cmd)))
self._exit_code = 1
sys.exit(1)
self._solver_file.seek(0)
def float_regex(self):
return '[+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?'
def read_runsolver_output(self):
'''
reads self._watcher_file,
extracts runtime
and returns if memout or timeout found
'''
if self._runsolver == "None":
self._ta_exit_code = 0
return
self.print_d("Reading runsolver output from %s" % (self._watcher_file.name))
data = self._watcher_file.read()
if (re.search('runsolver_max_cpu_time_exceeded', data) or re.search('Maximum CPU time exceeded', data)):
self._ta_status = "TIMEOUT"
if (re.search('runsolver_max_memory_limit_exceeded', data)):
self._ta_status = "TIMEOUT"
self._ta_misc = "memory limit was exceeded"
cpu_pattern1 = re.compile('runsolver_cputime: (%s)' % (self.float_regex()))
cpu_match1 = re.search(cpu_pattern1, data)
cpu_pattern2 = re.compile('CPU time \\(s\\): (%s)' % (self.float_regex()))
cpu_match2 = re.search(cpu_pattern2, data)
if (cpu_match1):
self._ta_runtime = float(cpu_match1.group(1))
if (cpu_match2):
self._ta_runtime = float(cpu_match2.group(1))
exitcode_pattern = re.compile('Child status: ([0-9]+)')
exitcode_match = re.search(exitcode_pattern, data)
if (exitcode_match):
self._ta_exit_code = int(exitcode_match.group(1))
def print_result_string(self):
sys.stdout.write("Result for ParamILS: %s, %s, %s, %s, %s" % (self._ta_status, str(self._ta_runtime), str(self._ta_runlength), str(self._ta_quality), str(self._seed)))
if (len(self._ta_misc) > 0):
sys.stdout.write(", %s" % (self._ta_misc))
print('')
def cleanup(self):
'''
cleanup if error occurred or external signal handled
'''
if (len(self._subprocesses) > 0):
print("killing the target run!")
try:
for sub in self._subprocesses:
#sub.terminate()
Popen(["pkill","-TERM", "-P",str(sub.pid)])
self.print_d("Wait %d seconds ..." % (self._DELAY2KILL))
time.sleep(self._DELAY2KILL)
if sub.returncode is None: # still running
sub.kill()
self.print_d("done... If anything in the subprocess tree fork'd a new process group, we may not have caught everything...")
self._ta_misc = "forced to exit by signal or keyboard interrupt."
self._ta_runtime = self._cutoff
except (OSError, KeyboardInterrupt, SystemExit):
self._ta_misc = "forced to exit by multiple signals/interrupts."
self._ta_runtime = self._cutoff
if (self._ta_status is "ABORT" or self._ta_status is "CRASHED"):
if (len(self._ta_misc) == 0):
self._ta_misc = 'Problem with run. Exit code was %d.' % (self._ta_exit_code)
if (self._watcher_file and self._solver_file):
self._ta_misc = self._ta_misc + '; Preserving runsolver output at %s - preserving target algorithm output at %s' % (self._watcher_file.name or "<none>", self._solver_file.name or "<none>")
try:
if (self._watcher_file):
self._watcher_file.close()
if (self._solver_file):
self._solver_file.close()
if (self._ta_status is not "ABORT" and self._ta_status is not "CRASHED"):
os.remove(self._watcher_file.name)
os.remove(self._solver_file.name)
except (OSError, KeyboardInterrupt, SystemExit):
self._ta_misc = "problems removing temporary files during cleanup."
except AttributeError:
pass #in internal mode, these files are not generated
if self._ta_status is "EXTERNALKILL":
self._ta_status = "CRASHED"
self._exit_code = 3
def get_command_line_args(self, runargs, config):
'''
Returns the command call list containing arguments to execute the implementing subclass' solver.
The default implementation delegates to get_command_line_args_ext. If this is not implemented, a
NotImplementedError will be raised.
Args:
runargs: a map of any non-configuration arguments required for the execution of the solver.
config: a mapping from parameter name (with prefix) to parameter value.
Returns:
A command call list to execute a target algorithm.
'''
raise NotImplementedError()
def get_command_line_args_ext(self, runargs, config, ext_call):
'''
When production of the target algorithm is done from a source other than python,
override this method to return a command call list to execute whatever you need to produce the command line.
Args:
runargs: a map of any non-configuration arguments required for the execution of the solver.
config: a mapping from parameter name (with prefix) to parameter value.
ext_call: string to call external program to get callstring of target algorithm
Returns:
A command call list to execute the command producing a single line of output containing the solver command string
'''
callstring_in = NamedTemporaryFile(suffix=".csv", prefix="callstring", dir=self._tmp_dir, delete=False)
callstring_in.write("%s\n" %(runargs["instance"]))
callstring_in.write("%d\n" %(runargs["seed"]))
for name,value in config.items():
callstring_in.write("%s,%s\n" %(name,value))
callstring_in.flush()
cmd = ext_call.split(" ")
cmd.append(callstring_in.name)
self.print_d(" ".join(cmd))
try:
io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE)
self._subprocesses.append(io)
out_, _ = io.communicate()
self._subprocesses.remove(io)
except OSError:
self._ta_misc = "failed to run external program for output parsing : %s" %(" ".join(cmd))
self._ta_runtime = self._cutoff
self._exit_code = 2
sys.exit(2)
if not out_ :
self._ta_misc = "external program for output parsing yielded empty output: %s" %(" ".join(cmd))
self._ta_runtime = self._cutoff
self._exit_code = 2
sys.exit(2)
callstring_in.close()
os.remove(callstring_in.name)
return out_.strip("\n")
def process_results(self, filepointer, out_args):
'''
Parse a results file to extract the run's status (SUCCESS/CRASHED/etc) and other optional results.
Args:
filepointer: a pointer to the file containing the solver execution standard out.
exit_code : exit code of target algorithm
Returns:
A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
{
"status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
"runtime" : <runtime of target algrithm>,
"quality" : <a domain specific measure of the quality of the solution [optional]>,
"misc" : <a (comma-less) string that will be associated with the run [optional]>
}
ATTENTION: The return values will overwrite the measured results of the runsolver (if runsolver was used).
'''
raise NotImplementedError()
def process_results_ext(self, filepointer, out_args, ext_call):
'''
Args:
filepointer: a pointer to the file containing the solver execution standard out.
exit_code : exit code of target algorithm
Returns:
A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
{
"status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
"quality" : <a domain specific measure of the quality of the solution [optional]>,
"misc" : <a (comma-less) string that will be associated with the run [optional]>
}
'''
cmd = ext_call.split(" ")
cmd.append(filepointer.name)
self.print_d(" ".join(cmd))
try:
io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE)
self._subprocesses.append(io)
out_, _ = io.communicate()
self._subprocesses.remove(io)
except OSError:
self._ta_misc = "failed to run external program for output parsing"
self._ta_runtime = self._cutoff
self._exit_code = 2
sys.exit(2)
result_map = {}
for line in out_.split("\n"):
if line.startswith("status:"):
result_map["status"] = line.split(":")[1].strip(" ")
elif line.startswith("quality:"):
result_map["quality"] = line.split(":")[1].strip(" ")
elif line.startswith("misc:"):
result_map["misc"] = line.split(":")[1]
return result_map
#===============================================================================
# if __name__ == "__main__":
# sys.exit(main())
#===============================================================================
|
Edraak/edraak-platform
|
refs/heads/master
|
common/djangoapps/util/migrations/0002_data__default_rate_limit_config.py
|
24
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Converted from the original South migration 0002_default_rate_limit_config.py
def forwards(apps, schema_editor):
"""Ensure that rate limiting is enabled by default. """
RateLimitConfiguration = apps.get_model("util", "RateLimitConfiguration")
objects = RateLimitConfiguration.objects
if not objects.exists():
objects.create(enabled=True)
class Migration(migrations.Migration):
dependencies = [
('util', '0001_initial'),
]
operations = [
migrations.RunPython(forwards)
]
|
Nitaco/ansible
|
refs/heads/devel
|
test/runner/lib/sanity/shellcheck.py
|
28
|
"""Sanity test using shellcheck."""
from __future__ import absolute_import, print_function
import os
from xml.etree.ElementTree import (
fromstring,
Element,
)
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
)
from lib.config import (
SanityConfig,
)
class ShellcheckTest(SanitySingleVersion):
"""Sanity test using shellcheck."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
with open('test/sanity/shellcheck/skip.txt', 'r') as skip_fd:
skip_paths = set(skip_fd.read().splitlines())
with open('test/sanity/shellcheck/exclude.txt', 'r') as exclude_fd:
exclude = set(exclude_fd.read().splitlines())
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] == '.sh' and i.path not in skip_paths)
if not paths:
return SanitySkipped(self.name)
cmd = [
'shellcheck',
'-e', ','.join(sorted(exclude)),
'--format', 'checkstyle',
] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status > 1:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
# json output is missing file paths in older versions of shellcheck, so we'll use xml instead
root = fromstring(stdout) # type: Element
results = []
for item in root: # type: Element
for entry in item: # type: Element
results.append(SanityMessage(
message=entry.attrib['message'],
path=item.attrib['name'],
line=int(entry.attrib['line']),
column=int(entry.attrib['column']),
level=entry.attrib['severity'],
code=entry.attrib['source'].replace('ShellCheck.', ''),
))
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
|
40223102/w17b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testhelpers.py
|
737
|
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
|
andrewebdev/django-ostinato
|
refs/heads/master
|
demo/blog/models.py
|
1
|
from django.db import models
from ostinato.blog.models import BlogEntryBase
from ostinato.pages.registry import page_content
from taggit.managers import TaggableManager
from website.models import SEOPage
class Entry(BlogEntryBase):
tags = TaggableManager()
class Meta:
verbose_name_plural = "Entries"
@models.permalink
def get_absolute_url(self):
if self.publish_date:
return ("blog_entry_detail", [], {
'year': self.publish_date.year,
'month': self.publish_date.strftime('%m'),
'day': self.publish_date.strftime('%d'),
'slug': self.slug,
})
else:
return ("blog_entry_preview", [self.id], {})
@page_content.register
class LandingPage(SEOPage):
max_latest_entries = models.IntegerField(default=10,
help_text="The maximum number of latest entries to display")
class ContentOptions:
template = 'blog/landing_page.html'
view = 'blog.views.LandingPageView'
|
sdh11/gnuradio
|
refs/heads/master
|
gr-analog/python/analog/qa_pll_freqdet.py
|
7
|
#!/usr/bin/env python
#
# Copyright 2004,2007,2010-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
import math
from gnuradio import gr, gr_unittest, analog, blocks
class test_pll_freqdet(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test_pll_freqdet(self):
expected_result = (0.0,
4.338889228818161e-08,
0.3776331578612825,
1.0993741049896133,
2.1332509128284287,
3.448827166947317,
5.017193050406445,
6.810936277840595,
8.804128662605573,
10.972292025122194,
13.292363360097312,
15.742678902380248,
18.302902979158944,
20.954030233328815,
23.678333003762834,
26.459293141999492,
29.2815901542755,
32.13105969864019,
34.99462836613535,
37.860284035876894,
40.71702547869386,
43.5548208542428,
46.364569172614004,
49.138038040003174,
51.86783994277676,
54.547378886619114,
57.17080592915505,
59.73298657053974,
62.229444428114014,
64.65634937843706,
67.01044048049889,
69.28902004673668,
71.48990028218192,
73.61137363954212,
75.65217724529884,
77.61146325478951,
79.48876920728905,
81.28396466515709,
82.9972452848542,
84.62912095897468,
86.18033873945902,
87.65188876657749,
89.0449983399466,
90.36106669970881,
91.6016768844999,
92.76854829957963,
93.86354857479924,
94.88865206171563,
95.84592204664062,
96.73751075064077,
97.56564154258655,
98.33257336525031,
99.04061259327368,
99.69208931723288,
100.28935141465512,
100.83475862103487,
101.33065881389933,
101.77937615484109,
102.18323480545271,
102.54452335342484,
102.8654948125462,
103.14836662270359,
103.39530879191456,
103.6084320383601,
103.78982336428665,
103.94148676616939,
104.06536695064705,
104.16337305045634,
104.23733119256288,
104.28900821409572,
104.32008794641274,
104.33220678900258,
104.32694185151738,
104.30578723783803,
104.27016590404165,
104.22144151636876,
104.16091845122337,
104.08982993720561,
104.00932619714447,
103.9205337379343,
103.82447234476369,
103.72213808688659,
103.6144440277858,
103.50225579907487,
103.38636788456353,
103.26755105212685,
103.14649306386876,
103.02383425002395,
102.90019122489248,
102.7761213129379,
102.65211069081985,
102.5286218192634,
102.40608158509168,
102.28486944325857,
102.16532927481605,
102.04778124488143,
101.93248622873554,
101.81969324369186,
101.70961573316195,
101.60243156665544)
sampling_freq = 10e3
freq = sampling_freq / 100
loop_bw = math.pi / 100.0
maxf = 1
minf = -1
src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0)
pll = analog.pll_freqdet_cf(loop_bw, maxf, minf)
head = blocks.head(gr.sizeof_float, int (freq))
dst = blocks.vector_sink_f()
self.tb.connect(src, pll, head)
self.tb.connect(head, dst)
self.tb.run()
dst_data = dst.data()
# convert it from normalized frequency to absolute frequency (Hz)
dst_data = [i*(sampling_freq / (2*math.pi)) for i in dst_data]
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 3)
if __name__ == '__main__':
gr_unittest.run(test_pll_freqdet, "test_pll_freqdet.xml")
|
gangadhar-kadam/sms-erpnext
|
refs/heads/master
|
buying/doctype/purchase_receipt_item_supplied/purchase_receipt_item_supplied.py
|
483
|
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
|
ActiveState/code
|
refs/heads/master
|
recipes/Python/474127_Simulating_concurrency/recipe-474127.py
|
1
|
import types
class method_pseudo_threads(object):
"""
Implements pseudo threads for class methods.
"""
_result = None
def __init__(self, caller, instance, *args, **kw):
self.caller = caller
self.this_function = self.func(instance, *args, **kw)
def next(self):
return self.this_function.next()
def call(self, function, *args, **kwds):
"""
Check if function is a method of an instance of pseudo_threads.
If so, call this method via the pseudo_thread mechanism
"""
special = hasattr(function, "_parallel")
if special:
return (None, function(self.this_function, *args, **kwds))
else:
return (function(*args, **kwds), self.this_function)
def Return(self, value):
"Return value to the caller"
return (value, self.caller)
def start(self, thread, *args, **kwds):
"Start a new pseudo_thread thread, which runs in parallel to the current thread"
return (None, [self.this_function, thread(None, *args, **kwds)])
def run(self):
"""
Calls next for all running threads.
"""
queue = [self.next()]
iterations = ticks = 0
while queue:
iterations += 1
newqueue = []
for result, continuation in queue:
ticks += 1
method_pseudo_threads._result = result
result, continuations = continuation.next()
if type(continuations) == types.ListType:
for continuation in continuations:
newqueue.append((None, continuation))
else:
if continuations:
newqueue.append((result, continuations))
queue = newqueue
self.iterations, self.ticks = iterations, ticks
return self._result
def parallel(function):
"""
Decorator to turn a method into a pseudo_thread.
The method itself should be written:
def (th, self, *args, **kwds):
Use th.call to call another method which is a pseudo thread.
Use th.Return to return a value to the caller.
Use th.start
self is the reference to the inclosing instance.
"""
class p(method_pseudo_threads):
name = "parallel_class_" + function.func_name
p.func = function
def glue(self, caller, *args, **kwds):
thread = p(caller, self, *args, **kwds)
yield (None, thread)
glue._parallel = True
glue.func_name = "parallel_" + function.func_name
return glue
class start(method_pseudo_threads):
def func(self, instance, *args, **kw):
yield (None, instance)
yield (None, None)
# Example:
class ackermann(object):
@parallel
def acker(th, self, m, n):
call, Return = th.call, th.Return
if m == 0:
yield th.Return(n+1)
elif m > 0 and n == 0:
yield call(self.acker, m-1, 1)
yield Return(th._result)
elif m > 0 and n > 0:
yield call(self.acker, m, n-1)
yield call(self.acker, m-1, th._result)
yield Return(th._result)
else:
assert 0
@parallel
def print_ackermann(th, self, m, n):
yield th.call(self.acker, m, n)
print "Ackerman(", m, ",", n, ")=", th._result
yield th.Return(None)
@parallel
def start(th, self, i, j):
for i1 in range(i):
for j1 in range(j):
yield th.start(self.print_ackermann, i1, j1)
yield th.Return(None)
def version2():
ack = ackermann()
th = ack.start(None, 4, 5)
start(None, th).run()
if __name__ == '__main__':
version2()
|
karwa/swift
|
refs/heads/master
|
test/Driver/Inputs/crash-after-generating-pch.py
|
28
|
#!/usr/bin/env python
# crash.py - Sends SIGKILL to self. -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import platform
import signal
import sys
assert sys.argv[1] == '-frontend'
if '-emit-pch' not in sys.argv:
if platform.system() == 'Windows':
exit(-2)
else:
os.kill(os.getpid(), signal.SIGKILL)
outputFile = sys.argv[sys.argv.index('-o') + 1]
# Update the output file mtime, or create it if necessary.
# From http://stackoverflow.com/a/1160227.
with open(outputFile, 'a'):
os.utime(outputFile, None)
|
IOArmory/quarterbackpython
|
refs/heads/master
|
lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
lukeiwanski/tensorflow
|
refs/heads/master
|
tensorflow/python/platform/app_test.py
|
201
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('myflag', False, '')
def main(argv):
if (len(argv) != 3):
print("Length of argv was not 3: ", argv)
sys.exit(-1)
if argv[1] != "--passthrough":
print("--passthrough argument not in argv")
sys.exit(-1)
if argv[2] != "extra":
print("'extra' argument not in argv")
sys.exit(-1)
if __name__ == '__main__':
sys.argv.extend(["--myflag", "--passthrough", "extra"])
app.run()
|
sujeet4github/MyLangUtils
|
refs/heads/master
|
LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/pip/_vendor/html5lib/_trie/_base.py
|
354
|
from __future__ import absolute_import, division, unicode_literals
from collections import Mapping
class Trie(Mapping):
"""Abstract base class for tries"""
def keys(self, prefix=None):
# pylint:disable=arguments-differ
keys = super(Trie, self).keys()
if prefix is None:
return set(keys)
# Python 2.6: no set comprehensions
return set([x for x in keys if x.startswith(prefix)])
def has_keys_with_prefix(self, prefix):
for key in self.keys():
if key.startswith(prefix):
return True
return False
def longest_prefix(self, prefix):
if prefix in self:
return prefix
for i in range(1, len(prefix) + 1):
if prefix[:-i] in self:
return prefix[:-i]
raise KeyError(prefix)
def longest_prefix_item(self, prefix):
lprefix = self.longest_prefix(prefix)
return (lprefix, self[lprefix])
|
sinperwolf/shadowsocks
|
refs/heads/master
|
shadowsocks/udprelay.py
|
924
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
|
joshloyal/scikit-learn
|
refs/heads/master
|
benchmarks/bench_rcv1_logreg_convergence.py
|
58
|
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
lygaret/django-testshell
|
refs/heads/master
|
testshell/models.py
|
1
|
# In order to register the application, we need a model.py,
# even if it's empty. Nothing to see here.
|
vveerava/Openstack
|
refs/heads/master
|
neutron/tests/functional/db/test_migrations.py
|
2
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pprint
import alembic
import alembic.autogenerate
import alembic.migration
import mock
from oslo.config import cfg
from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import test_migrations
import sqlalchemy
from neutron.db.migration import cli as migration
from neutron.db.migration.models import head as head_models
from neutron.openstack.common.fixture import config
LOG = logging.getLogger(__name__)
cfg.CONF.import_opt('core_plugin', 'neutron.common.config')
CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
# These tables are still in the neutron database, but their models have moved
# to the separate advanced services repositories. We skip the migration checks
# for these tables for now. The checks will be re-instated soon in the tests
# for each separate repository.
# TODO(akamyshnikova): delete these lists when the tables are removed from
# neutron database.
EXTERNAL_VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs',
'ipsec_site_connections', 'cisco_csr_identifier_map',
'ikepolicies']
EXTERNAL_LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools',
'healthmonitors', 'poolstatisticss', 'members',
'poolloadbalanceragentbindings', 'embrane_pool_port',
'poolmonitorassociations']
EXTERNAL_FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies']
EXTERNAL_TABLES = (EXTERNAL_FWAAS_TABLES + EXTERNAL_LBAAS_TABLES +
EXTERNAL_VPNAAS_TABLES)
class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
'''Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
'openstack_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
>create database openstack_citest;
>grant all privileges on openstack_citest.* to
openstack_citest@localhost identified by 'openstack_citest';
Output is a list that contains information about differences between db and
models. Output example::
[('add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
('remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
('add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
('remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[('modify_nullable',
None,
'foo',
u'x',
{'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
* ``remove_*`` means that there is extra table/column/constraint in db;
* ``add_*`` means that it is missing in db;
* ``modify_*`` means that on column in db is set wrong
type/nullable/server_default. Element contains information:
* what should be modified,
* schema,
* table,
* column,
* existing correct column parameters,
* right value,
* wrong value.
'''
def setUp(self):
patch = mock.patch.dict('sys.modules', {
'heleosapi': mock.MagicMock(),
'midonetclient': mock.MagicMock(),
'midonetclient.neutron': mock.MagicMock(),
})
patch.start()
self.addCleanup(patch.stop)
super(_TestModelsMigrations, self).setUp()
self.cfg = self.useFixture(config.Config())
self.cfg.config(core_plugin=CORE_PLUGIN)
self.alembic_config = migration.get_alembic_config()
self.alembic_config.neutron_config = cfg.CONF
def db_sync(self, engine):
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(self.alembic_config, 'upgrade', 'head')
cfg.CONF.clear_override('connection', group='database')
def get_engine(self):
return self.engine
def get_metadata(self):
return head_models.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name == 'alembic_version'
or name in EXTERNAL_TABLES):
return False
return super(_TestModelsMigrations, self).include_object(
object_, name, type_, reflected, compare_to)
def test_models_sync(self):
# drop all tables after a test run
self.addCleanup(self._cleanup)
# run migration scripts
self.db_sync(self.get_engine())
with self.get_engine().connect() as conn:
opts = {
'include_object': self.include_object,
'compare_type': self.compare_type,
'compare_server_default': self.compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(conn, opts=opts)
# compare schemas and fail with diff, if it's not empty
diff1 = alembic.autogenerate.compare_metadata(mc,
self.get_metadata())
insp = sqlalchemy.engine.reflection.Inspector.from_engine(
self.get_engine())
dialect = self.get_engine().dialect.name
self.check_mysql_engine(dialect, insp)
diff2 = self.check_foreign_keys(self.get_metadata(),
self.get_engine())
result = filter(self.remove_unrelated_errors, diff1 + diff2)
if result:
msg = pprint.pformat(result, indent=2, width=20)
self.fail("Models and migration scripts aren't in sync:\n%s" % msg)
def check_mysql_engine(self, dialect, insp):
if dialect != 'mysql':
return
# Test that table creation on mysql only builds InnoDB tables
tables = insp.get_table_names()
self.assertTrue(len(tables) > 0,
"No tables found. Wrong schema?")
noninnodb = [table for table in tables if
insp.get_table_options(table)['mysql_engine'] != 'InnoDB'
and table != 'alembic_version']
self.assertEqual(0, len(noninnodb), "%s non InnoDB tables created" %
noninnodb)
# Remove some difference that are not mistakes just specific of
# dialects, etc
def remove_unrelated_errors(self, element):
insp = sqlalchemy.engine.reflection.Inspector.from_engine(
self.get_engine())
dialect = self.get_engine().dialect.name
if isinstance(element, tuple):
if dialect == 'mysql' and element[0] == 'remove_index':
table_name = element[1].table.name
for fk in insp.get_foreign_keys(table_name):
if fk['name'] == element[1].name:
return False
cols = [c.name for c in element[1].expressions]
for col in cols:
if col in insp.get_pk_constraint(
table_name)['constrained_columns']:
return False
else:
for modified, _, table, column, _, _, new in element:
if modified == 'modify_default' and dialect == 'mysql':
constrained = insp.get_pk_constraint(table)
if column in constrained['constrained_columns']:
return False
return True
class TestModelsMigrationsMysql(_TestModelsMigrations,
test_base.MySQLOpportunisticTestCase):
pass
class TestModelsMigrationsPsql(_TestModelsMigrations,
test_base.PostgreSQLOpportunisticTestCase):
pass
|
thesoftwarejedi/bitcoin
|
refs/heads/master
|
qa/rpc-tests/blockstore.py
|
98
|
# BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
#
from mininode import *
import dbm
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0L
def close(self):
self.blockDB.close()
def get(self, blockhash):
serialized_block = None
try:
serialized_block = self.blockDB[repr(blockhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block = self.get(current_tip)
if current_block is None:
return None
response = msg_headers()
headersList = [ CBlockHeader(current_block) ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlock = self.get(prevBlockHash)
if prevBlock is not None:
headersList.insert(0, CBlockHeader(prevBlock))
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
self.currentBlock = block.sha256
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
block = self.get(i.hash)
if block is not None:
responses.append(msg_block(block))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbm.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
def get(self, txhash):
serialized_tx = None
try:
serialized_tx = self.txDB[repr(txhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_tx(tx))
return responses
|
jiadaizhao/LeetCode
|
refs/heads/master
|
1101-1200/1171-Remove Zero Sum Consecutive Nodes from Linked List/1171-Remove Zero Sum Consecutive Nodes from Linked List.py
|
1
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeZeroSumSublists(self, head: ListNode) -> ListNode:
dummy = ListNode(0)
curr = dummy
dummy.next = head
s = 0
table = {}
while curr:
s += curr.val
if s in table:
next = table[s].next
temp = s + next.val
while temp != s:
del table[temp]
next = next.next
temp += next.val
table[s].next = curr.next
else:
table[s] = curr
curr = curr.next
return dummy.next
|
vincepandolfo/django
|
refs/heads/master
|
tests/admin_custom_urls/urls.py
|
405
|
from django.conf.urls import url
from .models import site
urlpatterns = [
url(r'^admin/', site.urls),
]
|
fangxingli/hue
|
refs/heads/master
|
desktop/core/ext-py/simplejson/simplejson/tests/test_default.py
|
261
|
from unittest import TestCase
import simplejson as json
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
json.dumps(type, default=repr),
json.dumps(repr(type)))
|
lmurta/analogRead
|
refs/heads/master
|
get-pip.py
|
19
| null |
Mj258/weiboapi
|
refs/heads/master
|
srapyDemo/envs/Lib/site-packages/pyasn1_modules/rfc3447.py
|
127
|
#
# PKCS#1 syntax
#
# ASN.1 source from:
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn
#
# Sample captures could be obtained with "openssl genrsa" command
#
from pyasn1_modules.rfc2437 import *
class OtherPrimeInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('prime', univ.Integer()),
namedtype.NamedType('exponent', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer())
)
class OtherPrimeInfos(univ.SequenceOf):
componentType = OtherPrimeInfo()
subtypeSpec = univ.SequenceOf.subtypeSpec + \
constraint.ValueSizeConstraint(1, MAX)
class RSAPrivateKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos())
)
|
gchp/django
|
refs/heads/master
|
tests/queryset_pickle/tests.py
|
51
|
from __future__ import unicode_literals
import datetime
import pickle
import unittest
from django.db import models
from django.test import TestCase
from django.utils import six
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is picklable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_annotation_with_callable_default(self):
# Happening.when has a callable default of datetime.datetime.now.
qs = Happening.objects.annotate(latest_time=models.Max('when'))
self.assert_pickles(qs)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
msg = "Pickled queryset instance's Django version is not specified."
with self.assertRaisesMessage(RuntimeWarning, msg):
pickle.loads(pickle.dumps(qs))
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
msg = "Pickled queryset instance's Django version 1.0 does not match the current version %s." % get_version()
with self.assertRaisesMessage(RuntimeWarning, msg):
pickle.loads(pickle.dumps(qs))
|
jbalogh/zamboni
|
refs/heads/master
|
apps/browse/tests.py
|
1
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_dt
import re
from urlparse import urlparse
from django import http
from django.conf import settings
from django.core.cache import cache
from django.utils import http as urllib
from jingo.helpers import datetime as datetime_filter
import mock
from nose import SkipTest
from nose.tools import eq_, assert_raises, nottest
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from amo.helpers import absolutify, numberfmt, urlparams
from addons.tests.test_views import TestMobile
from addons.models import (Addon, AddonCategory, Category, AppSupport, Feature,
Persona)
from addons.utils import FeaturedManager
from applications.models import Application
from bandwagon.models import Collection, CollectionAddon, FeaturedCollection
from browse import views, feeds
from browse.views import locale_display_name, AddonFilter, ThemeFilter
from translations.models import Translation
from translations.query import order_by_translation
from versions.models import Version
@nottest
def test_listing_sort(self, sort, key=None, reverse=True, sel_class='opt'):
r = self.client.get(urlparams(self.url, sort=sort))
eq_(r.status_code, 200)
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), sel_class)
eq_(r.context['sorting'], sort)
if key:
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: getattr(x, key), reverse=reverse))
@nottest
def test_default_sort(self, sort, key=None):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['sorting'], sort)
r = self.client.get(urlparams(self.url, sort='xxx'))
eq_(r.status_code, 200)
eq_(r.context['sorting'], sort)
test_listing_sort(self, sort, key)
class ExtensionTestCase(amo.tests.ESTestCase):
es = True
@classmethod
def setUpClass(cls):
super(ExtensionTestCase, cls).setUpClass()
setUpIndex()
def setUp(self):
super(ExtensionTestCase, self).setUp()
self.url = reverse('browse.es.extensions')
class TestUpdatedSort(ExtensionTestCase):
# This needs to run in its own class for isolation.
def test_updated_sort(self):
r = self.client.get(urlparams(self.url, sort='updated'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons),
sorted(addons, key=lambda x: x.last_updated, reverse=True))
class TestESExtensions(ExtensionTestCase):
def test_landing(self):
r = self.client.get(self.url)
self.assertTemplateUsed(r, 'browse/extensions.html')
self.assertTemplateUsed(r, 'addons/impala/listing/items.html')
eq_(r.context['sorting'], 'popular')
eq_(r.context['category'], None)
doc = pq(r.content)
eq_(doc('body').hasClass('s-featured'), True)
eq_(doc('.addon-listing .listview').length, 0)
def test_name_sort(self):
r = self.client.get(urlparams(self.url, sort='name'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons), sorted(addons, key=lambda x: x.name))
def test_created_sort(self):
r = self.client.get(urlparams(self.url, sort='created'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons),
sorted(addons, key=lambda x: x.created, reverse=True))
def test_popular_sort(self):
r = self.client.get(urlparams(self.url, sort='popular'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons),
sorted(addons, key=lambda x: x.weekly_downloads, reverse=True))
def test_rating_sort(self):
r = self.client.get(urlparams(self.url, sort='rating'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons),
sorted(addons, key=lambda x: x.bayesian_rating, reverse=True))
def test_category(self):
# Stick one add-on in a category, make sure search finds it.
addon = Addon.objects.filter(status=amo.STATUS_PUBLIC,
disabled_by_user=False)[0]
c = Category.objects.create(application_id=amo.FIREFOX.id,
slug='alerts', type=addon.type)
AddonCategory.objects.create(category=c, addon=addon)
addon.save()
self.refresh()
cat_url = reverse('browse.es.extensions', args=['alerts'])
r = self.client.get(urlparams(cat_url))
addons = r.context['addons'].object_list
eq_(list(addons), [addon])
def test_invalid_sort(self):
r = self.client.get(urlparams(self.url, sort='wut'))
addons = r.context['addons'].object_list
assert list(addons)
eq_(list(addons),
sorted(addons, key=lambda x: x.weekly_downloads, reverse=True))
def test_locale_display_name():
def check(locale, english, native):
actual = locale_display_name(locale)
eq_(actual, (english, native))
check('el', 'Greek', u'Ελληνικά')
check('el-XX', 'Greek', u'Ελληνικά')
assert_raises(KeyError, check, 'fake-lang', '', '')
class TestListing(amo.tests.TestCase):
fixtures = ['base/apps', 'base/category', 'base/featured',
'addons/featured', 'addons/listed', 'base/collections',
'bandwagon/featured_collections']
def setUp(self):
self.reset_featured_addons()
self.url = reverse('browse.extensions')
def test_default_sort(self):
r = self.client.get(self.url)
eq_(r.context['sorting'], 'featured')
def test_featured_sort(self):
r = self.client.get(urlparams(self.url, sort='featured'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'opt')
eq_(sel.text(), 'Featured')
def test_mostusers_sort(self):
r = self.client.get(urlparams(self.url, sort='users'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'opt')
eq_(sel.text(), 'Most Users')
a = r.context['addons'].object_list
eq_(list(a),
sorted(a, key=lambda x: x.average_daily_users, reverse=True))
def test_toprated_sort(self):
r = self.client.get(urlparams(self.url, sort='rating'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'opt')
eq_(sel.text(), 'Top Rated')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.bayesian_rating, reverse=True))
def test_newest_sort(self):
r = self.client.get(urlparams(self.url, sort='created'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'opt')
eq_(sel.text(), 'Newest')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.created, reverse=True))
def test_name_sort(self):
r = self.client.get(urlparams(self.url, sort='name'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'extra-opt')
eq_(sel.text(), 'Name')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.name))
def test_weeklydownloads_sort(self):
r = self.client.get(urlparams(self.url, sort='popular'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'extra-opt')
eq_(sel.text(), 'Weekly Downloads')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.weekly_downloads, reverse=True))
def test_updated_sort(self):
r = self.client.get(urlparams(self.url, sort='updated'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'extra-opt')
eq_(sel.text(), 'Recently Updated')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.last_updated, reverse=True))
def test_upandcoming_sort(self):
r = self.client.get(urlparams(self.url, sort='hotness'))
sel = pq(r.content)('#sorter ul > li.selected')
eq_(sel.find('a').attr('class'), 'extra-opt')
eq_(sel.text(), 'Up & Coming')
a = r.context['addons'].object_list
eq_(list(a), sorted(a, key=lambda x: x.hotness, reverse=True))
def test_added_date(self):
doc = pq(self.client.get(urlparams(self.url, sort='created')).content)
for item in doc('.items .item'):
item = pq(item)
addon_id = item('.install').attr('data-addon')
ts = Addon.objects.get(id=addon_id).created
eq_(item('.updated').text(), 'Added %s' % datetime_filter(ts))
def test_updated_date(self):
doc = pq(self.client.get(urlparams(self.url, sort='updated')).content)
for item in doc('.items .item'):
item = pq(item)
addon_id = item('.install').attr('data-addon')
ts = Addon.objects.get(id=addon_id).last_updated
eq_(item('.updated').text(), 'Updated %s' % datetime_filter(ts))
def test_users_adu_unit(self):
doc = pq(self.client.get(urlparams(self.url, sort='users')).content)
for item in doc('.items .item'):
item = pq(item)
addon_id = item('.install').attr('data-addon')
adu = Addon.objects.get(id=addon_id).average_daily_users
eq_(item('.adu').text(),
'%s user%s' % (numberfmt(adu), 's' if adu != 1 else ''))
def test_popular_adu_unit(self):
doc = pq(self.client.get(urlparams(self.url, sort='popular')).content)
for item in doc('.items .item'):
item = pq(item)
addon_id = item('.install').attr('data-addon')
adu = Addon.objects.get(id=addon_id).weekly_downloads
eq_(item('.adu').text(),
'%s weekly download%s' % (numberfmt(adu),
's' if adu != 1 else ''))
class TestLanguageTools(amo.tests.TestCase):
fixtures = ['browse/test_views']
def setUp(self):
super(TestLanguageTools, self).setUp()
cache.clear()
self.url = reverse('browse.language-tools')
response = self.client.get(self.url, follow=True)
# For some reason the context doesn't get loaded the first time.
response = self.client.get(self.url, follow=True)
self.locales = list(response.context['locales'])
def test_sorting(self):
"""The locales should be sorted by English display name."""
displays = [locale.display for _, locale in self.locales]
eq_(displays, sorted(displays))
def test_native_missing_region(self):
"""
If we had to strip a locale's region to find a display name, we
append it to the native name for disambiguation.
"""
el = dict(self.locales)['el-XX']
assert el.native.endswith(' (el-xx)')
def test_missing_locale(self):
"""If we don't know about a locale, show the addon name and locale."""
wa = dict(self.locales)['wa']
eq_(wa.display, 'Walloon Language Pack (wa)')
eq_(wa.native, '')
def test_packs_and_dicts(self):
ca = dict(self.locales)['ca-valencia']
eq_(len(ca.dicts), 1)
eq_(len(ca.packs), 3)
def test_empty_target_locale(self):
"""Make sure nothing breaks with empty target locales."""
for addon in Addon.objects.all():
addon.target_locale = ''
addon.save()
response = self.client.get(self.url, follow=True)
eq_(response.status_code, 200)
eq_(list(response.context['locales']), [])
def test_null_target_locale(self):
"""Make sure nothing breaks with null target locales."""
for addon in Addon.objects.all():
addon.target_locale = None
addon.save()
response = self.client.get(self.url, follow=True)
eq_(response.status_code, 200)
eq_(list(response.context['locales']), [])
class TestThemes(amo.tests.TestCase):
fixtures = ('base/category', 'base/addon_6704_grapple', 'base/addon_3615')
def setUp(self):
super(TestThemes, self).setUp()
# Make all the add-ons themes.
for addon in Addon.objects.all():
addon.type = amo.ADDON_THEME
addon.save()
for category in Category.objects.all():
category.type = amo.ADDON_THEME
category.save()
self.url = reverse('browse.themes')
def test_unreviewed(self):
pop = urlparams(self.url, sort='popular')
# Only 3 without unreviewed.
response = self.client.get(pop)
eq_(len(response.context['addons'].object_list), 2)
response = self.client.get(pop)
eq_(len(response.context['addons'].object_list), 2)
def test_default_sort(self):
test_default_sort(self, 'users', 'average_daily_users')
def test_rating_sort(self):
test_listing_sort(self, 'rating', 'bayesian_rating')
def test_newest_sort(self):
test_listing_sort(self, 'created', 'created')
def test_name_sort(self):
test_listing_sort(self, 'name', 'name', reverse=False,
sel_class='extra-opt')
def test_featured_sort(self):
test_listing_sort(self, 'featured', reverse=False,
sel_class='extra-opt')
def test_downloads_sort(self):
test_listing_sort(self, 'popular', 'weekly_downloads',
sel_class='extra-opt')
def test_updated_sort(self):
test_listing_sort(self, 'updated', 'last_updated',
sel_class='extra-opt')
def test_upandcoming_sort(self):
test_listing_sort(self, 'hotness', 'hotness', sel_class='extra-opt')
def test_category_sidebar(self):
c = Category.objects.filter(weight__gte=0).values_list('id', flat=True)
doc = pq(self.client.get(self.url).content)
for id in c:
eq_(doc('#side-categories #c-%s' % id).length, 1)
class TestCategoryPages(amo.tests.TestCase):
fixtures = ['base/apps', 'base/category', 'base/addon_3615',
'base/featured', 'addons/featured', 'browse/nameless-addon']
def setUp(self):
patcher = mock.patch.object(settings, 'NEW_FEATURES', False)
patcher.start()
self.addCleanup(patcher.stop)
def test_browsing_urls(self):
"""Every browse page URL exists."""
for _, slug in amo.ADDON_SLUGS.items():
view = 'apps.list' if slug == 'apps' else 'browse.%s' % slug
assert reverse(view)
def test_matching_opts(self):
"""Every filter on landing pages is available on listing pages."""
for key, _ in views.CategoryLandingFilter.opts:
if key != 'featured':
assert key in dict(views.AddonFilter.opts)
@mock.patch('browse.views.category_landing')
def test_goto_category_landing(self, landing_mock):
"""We hit a landing page if there's a category and no sorting."""
landing_mock.return_value = http.HttpResponse()
self.client.get(reverse('browse.extensions'))
assert not landing_mock.called
category = Category.objects.all()[0]
category_url = reverse('browse.extensions', args=[category.slug])
self.client.get('%s?sort=created' % category_url)
assert not landing_mock.called
self.client.get(category_url)
assert landing_mock.called
# Category with fewer than 5 add-ons bypasses landing page.
category.count = 4
category.save()
self.client.get(category_url)
eq_(landing_mock.call_count, 1)
def test_creatured_addons(self):
"""Make sure the creatured add-ons are for the right category."""
# Featured in bookmarks.
url = reverse('browse.extensions', args=['bookmarks'])
response = self.client.get(url, follow=True)
creatured = response.context['filter'].all()['featured']
eq_(len(creatured), 1)
eq_(creatured[0].id, 3615)
# Not featured in search-tools.
url = reverse('browse.extensions', args=['search-tools'])
response = self.client.get(url, follow=True)
creatured = response.context['filter'].all()['featured']
eq_(len(creatured), 0)
def test_creatured_only_public(self):
"""Make sure the creatured add-ons are all public."""
url = reverse('browse.creatured', args=['bookmarks'])
r = self.client.get(url, follow=True)
addons = r.context['addons']
for a in addons:
assert a.status == amo.STATUS_PUBLIC, "%s is not public" % a.name
old_count = len(addons)
addons[0].status = amo.STATUS_UNREVIEWED
addons[0].save()
r = self.client.get(url, follow=True)
addons = r.context['addons']
for a in addons:
assert a.status == amo.STATUS_PUBLIC, ("Altered %s is featured"
% a.name)
eq_(len(addons), old_count - 1, "The number of addons is the same.")
def test_sorting_nameless(self):
"""Nameless add-ons are dropped from the sort."""
qs = Addon.objects.all()
ids = order_by_translation(qs, 'name')
assert 57132 in [a.id for a in qs]
assert 57132 not in [a.id for a in ids]
class TestFeeds(amo.tests.TestCase):
fixtures = ['base/apps', 'base/category', 'base/featured',
'addons/featured', 'addons/listed', 'base/collections',
'bandwagon/featured_collections']
def setUp(self):
self.reset_featured_addons()
self.url = reverse('browse.extensions')
self.rss_url = reverse('browse.extensions.rss')
self.filter = AddonFilter
def _check_feed(self, browse_url, rss_url, sort='featured'):
"""
Check RSS feed URLs and that the results on the listing pages match
those for their respective RSS feeds.
"""
# Check URLs.
r = self.client.get(browse_url, follow=True)
doc = pq(r.content)
rss_url += '?sort=%s' % sort
eq_(doc('link[type="application/rss+xml"]').attr('href'), rss_url)
eq_(doc('#subscribe').attr('href'), rss_url)
# Ensure that the RSS items match those on the browse listing pages.
r = self.client.get(rss_url)
rss_doc = pq(r.content)
pg_items = doc('.items .item')
rss_items = rss_doc('item')
for pg_item, rss_item in zip(pg_items, rss_items):
pg_item, rss_item = pq(pg_item), pq(rss_item)
pg_url = absolutify(pg_item.find('h3 a').attr('href'))
rss_url = rss_item.find('link').text()
abs_url = pg_url.split('?')[0]
assert rss_url.endswith(abs_url), 'Unexpected URL: %s' % abs_url
if sort in ('added', 'updated'):
# Check timestamps.
pg_ts = pg_item.find('.updated').text().strip('Added Updated')
rss_ts = rss_item.find('pubDate').text()
# Look at YMD, since we don't have h:m on listing pages.
eq_(parse_dt(pg_ts).isocalendar(),
parse_dt(rss_ts).isocalendar())
def _check_sort_urls(self, items, opts):
items = sorted(items, key=lambda x: x.get('href'))
options = getattr(self.filter, opts)
options = sorted(options, key=lambda x: x[0])
for item, options in zip(items, options):
item = pq(item)
slug, title = options
url = '%s?sort=%s' % (self.url, slug)
eq_(item.attr('href'), url)
eq_(item.text(), unicode(title))
self._check_feed(url, self.rss_url, slug)
def test_feed(self):
eq_(self.client.get(self.rss_url).status_code, 200)
def test_sort_opts_urls(self):
r = self.client.get(self.url, follow=True)
s = pq(r.content)('#sorter')
self._check_feed(self.url, self.rss_url, 'featured')
self._check_sort_urls(s.find('a.opt'), 'opts')
self._check_sort_urls(s.find('a.extra-opt'), 'extras')
class TestFeaturedLocale(amo.tests.TestCase):
fixtures = ['base/apps', 'base/category', 'base/addon_3615',
'base/featured', 'addons/featured', 'browse/nameless-addon']
def setUp(self):
patcher = mock.patch.object(settings, 'NEW_FEATURES', False)
patcher.start()
self.addCleanup(patcher.stop)
self.addon = Addon.objects.get(pk=3615)
self.persona = Addon.objects.get(pk=15679)
self.extension = Addon.objects.get(pk=2464)
self.category = Category.objects.get(slug='bookmarks')
self.url = reverse('browse.creatured', args=['bookmarks'])
cache.clear()
def change_addoncategory(self, addon, locale='es-ES'):
ac = addon.addoncategory_set.all()[0]
ac.feature_locales = locale
ac.save()
self.reset()
def change_addon(self, addon, locale='es-ES'):
feature = addon.feature_set.all()[0]
feature.locale = locale
feature.save()
self.reset()
def reset(self):
cache.clear()
FeaturedManager.redis().flushall()
self.reset_featured_addons()
def list_featured(self, content):
# Not sure we want to get into testing randomness
# between multiple executions of a page, but if this is a quick
# way to print out the results and check yourself that they
# are changing.
doc = pq(content)
ass = doc('.featured-inner .item a')
rx = re.compile('/(en-US|es-ES)/firefox/addon/(\d+)/$')
for a in ass:
mtch = rx.match(a.attrib['href'])
if mtch:
print mtch.group(2)
def test_creatured_random_caching(self):
rnd = AddonCategory.creatured_random
cat = Category.objects.get(pk=22)
self.assertNumQueries(0, rnd, cat, 'en-US')
self.assertNumQueries(0, rnd, cat, 'en-US')
self.assertNumQueries(0, rnd, cat, 'es-ES')
def test_featured_random_caching(self):
rnd = Addon.featured_random
self.assertNumQueries(0, rnd, amo.FIREFOX, 'en-US')
self.assertNumQueries(0, rnd, amo.FIREFOX, 'es-ES')
self.assertNumQueries(0, rnd, amo.THUNDERBIRD, 'es-ES')
self.assertNumQueries(0, rnd, amo.THUNDERBIRD, 'en-US')
def test_creatured_locale_en_US(self):
res = self.client.get(self.url)
assert self.addon in res.context['addons']
def test_creatured_locale_nones(self):
self.change_addoncategory(self.addon, '')
res = self.client.get(self.url)
assert self.addon in res.context['addons']
self.change_addoncategory(self.addon, None)
res = self.client.get(self.url)
assert self.addon in res.context['addons']
def test_creatured_locale_many(self):
self.change_addoncategory(self.addon, 'en-US,es-ES')
res = self.client.get(self.url)
assert self.addon in res.context['addons']
res = self.client.get(self.url.replace('en-US', 'es-ES'))
assert self.addon in res.context['addons']
def test_creatured_locale_not_en_US(self):
self.change_addoncategory(self.addon, 'es-ES')
res = self.client.get(self.url)
assert self.addon not in res.context['addons']
def test_creatured_locale_es_ES(self):
res = self.client.get(self.url.replace('en-US', 'es-ES'))
assert self.addon in res.context['addons']
def test_featured_locale_en_US(self):
res = self.client.get(reverse('browse.extensions') + '?sort=featured')
assert self.extension in res.context['addons']
def test_featured_locale_not_persona_en_US(self):
res = self.client.get(reverse('browse.extensions') + '?sort=featured')
assert not self.persona in res.context['addons']
def test_featured_locale_es_ES(self):
self.change_addon(self.extension, 'es-ES')
url = reverse('browse.extensions') + '?sort=featured'
res = self.client.get(url)
assert self.extension not in res.context['addons']
res = self.client.get(url.replace('en-US', 'es-ES'))
self.change_addon(self.extension, 'es-ES')
assert self.extension in res.context['addons']
def test_featured_extensions_no_category_en_US(self):
addon = self.extension
res = self.client.get(reverse('browse.extensions'))
assert addon in res.context['addons'].object_list
def test_featured_extensions_with_category_es_ES(self):
addon = self.addon
res = self.client.get(reverse('browse.extensions', args=['bookmarks']))
assert addon in res.context['filter'].all()['featured']
self.change_addoncategory(addon, 'es-ES')
res = self.client.get(reverse('browse.extensions', args=['bookmarks']))
assert addon not in res.context['filter'].all()['featured']
def test_featured_persona_no_category_en_US(self):
addon = self.persona
url = reverse('browse.personas')
res = self.client.get(url)
assert addon in res.context['featured']
self.change_addon(addon, 'es-ES')
res = self.client.get(url)
assert addon not in res.context['featured']
res = self.client.get(url.replace('en-US', 'es-ES'))
assert addon in res.context['featured']
def test_featured_persona_category_en_US(self):
addon = self.persona
category = Category.objects.get(id=22)
category.update(type=amo.ADDON_PERSONA)
addon.addoncategory_set.create(category=category, feature=True)
self.reset()
url = reverse('browse.personas', args=[category.slug])
res = self.client.get(url)
assert addon in res.context['featured']
self.change_addoncategory(addon, 'es-ES')
res = self.client.get(url)
assert addon not in res.context['featured']
res = self.client.get(url.replace('en-US', 'es-ES'))
assert addon in res.context['featured']
def test_homepage(self):
url = reverse('home')
res = self.client.get(url)
assert self.extension in res.context['featured']
self.change_addon(self.extension, 'es-ES')
res = self.client.get(url)
assert self.extension not in res.context['featured']
res = self.client.get(url.replace('en-US', 'es-ES'))
assert self.extension in res.context['featured']
def test_homepage_persona(self):
res = self.client.get(reverse('home'))
assert self.persona not in res.context['featured']
def test_homepage_filter(self):
# Ensure that the base homepage filter is applied.
res = self.client.get(reverse('home'))
listed = [p.pk for p in Addon.objects
.listed(amo.FIREFOX)
.exclude(type=amo.ADDON_PERSONA)]
featured = Addon.featured_random(amo.FIREFOX, 'en-US')
actual = [p.pk for p in res.context['featured']]
eq_(sorted(actual), sorted(set(listed) & set(featured)))
def test_homepage_listed_single(self):
listed = [p.pk for p in Addon.objects.listed(amo.FIREFOX)]
eq_(listed.count(7661), 1)
addon = Addon.objects.get(pk=7661)
addon.update(status=amo.STATUS_PUBLIC)
listed = [p.pk for p in Addon.objects.listed(amo.FIREFOX)]
eq_(listed.count(7661), 1)
def test_homepage_order(self):
# Make these apps listed.
for pk in [1003, 3481]:
addon = Addon.objects.get(pk=pk)
addon.update(status=amo.STATUS_PUBLIC)
addon.appsupport_set.create(app_id=1)
# Note 1003 and 3481 are now en-US.
# And 7661 and 2464 are now None.
# The order should be random within those boundaries.
another = Addon.objects.get(id=1003)
self.change_addon(another, 'en-US')
self.reset_featured_addons()
url = reverse('home')
res = self.client.get(url)
items = res.context['featured']
eq_([1003, 3481], sorted([i.pk for i in items[0:2]]))
eq_([2464, 7661], sorted([i.pk for i in items[2:]]))
res = self.client.get(url.replace('en-US', 'es-ES'))
items = res.context['featured']
eq_([2464, 7661], sorted([i.pk for i in items]))
self.change_addon(another, 'es-ES')
res = self.client.get(url.replace('en-US', 'es-ES'))
items = res.context['featured']
eq_(items[0].pk, 1003)
eq_([1003, 2464, 7661], sorted([i.pk for i in items]))
def test_featured_ids(self):
another = Addon.objects.get(id=1003)
self.change_addon(another, 'en-US')
items = Addon.featured_random(amo.FIREFOX, 'en-US')
# The order should be random within those boundaries.
eq_([1003, 3481], sorted(items[0:2]))
eq_([1001, 2464, 7661, 15679], sorted(items[2:]))
def test_featured_duplicated(self):
another = Addon.objects.get(id=1003)
self.change_addon(another, 'en-US')
another.feature_set.create(application_id=amo.FIREFOX.id,
locale=None,
start=datetime.today(),
end=datetime.today())
eq_(Addon.featured_random(amo.FIREFOX, 'en-US').count(1003), 1)
class TestNewFeaturedLocale(TestFeaturedLocale):
fixtures = (TestFeaturedLocale.fixtures +
['base/collections', 'addons/featured', 'base/featured',
'bandwagon/featured_collections',
'base/addon_3615_featuredcollection'])
# TODO(cvan): Merge with above once new featured add-ons are enabled.
def setUp(self):
super(TestNewFeaturedLocale, self).setUp()
patcher = mock.patch.object(settings, 'NEW_FEATURES', True)
patcher.start()
self.reset_featured_addons()
self.addCleanup(patcher.stop)
def test_featured_random_caching(self):
raise SkipTest() # We're no longer caching `featured_random`.
def test_creatured_random_caching(self):
raise SkipTest() # We're no longer caching `creatured_random`.
def change_addon(self, addon, locale='es-ES'):
fc = FeaturedCollection.objects.filter(collection__addons=addon.id)[0]
feature = FeaturedCollection.objects.create(locale=locale,
application=Application.objects.get(id=amo.FIREFOX.id),
collection=Collection.objects.create())
c = CollectionAddon.objects.filter(addon=addon,
collection=fc.collection)[0]
c.collection = feature.collection
c.save()
self.reset()
def change_addoncategory(self, addon, locale='es-ES'):
CollectionAddon.objects.filter(addon=addon).delete()
locales = (locale or '').split(',')
for locale in locales:
c = CollectionAddon.objects.create(addon=addon,
collection=Collection.objects.create())
FeaturedCollection.objects.create(locale=locale,
application=Application.objects.get(id=amo.FIREFOX.id),
collection=c.collection)
self.reset()
def test_featured_ids(self):
# TODO(cvan): Change the TestFeaturedLocale test
# accordingly after we switch over to the new features.
FeaturedCollection.objects.filter(collection__addons=3615)[0].delete()
super(TestNewFeaturedLocale, self).test_featured_ids()
def test_homepage_order(self):
# TODO(cvan): Change the TestFeaturedLocale test
# accordingly after we switch over to the new features.
FeaturedCollection.objects.filter(collection__addons=3615)[0].delete()
super(TestNewFeaturedLocale, self).test_homepage_order()
def test_creatured_locale_es_ES(self):
"""Ensure 'en-US'-creatured add-ons do not exist for other locales."""
res = self.client.get(self.url.replace('en-US', 'es-ES'))
assert self.addon not in res.context['addons']
class TestListingByStatus(amo.tests.TestCase):
fixtures = ['base/apps', 'base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(id=3615)
def get_addon(self, addon_status, file_status):
self.addon.current_version.all_files[0].update(status=file_status)
self.addon.update(status=addon_status, _current_version=None)
self.addon.update_version()
return Addon.objects.get(id=3615)
def check(self, exp):
r = self.client.get(reverse('browse.extensions') + '?sort=created')
addons = list(r.context['addons'].object_list)
eq_(addons, exp)
def test_public_public_listed(self):
self.get_addon(amo.STATUS_PUBLIC, amo.STATUS_PUBLIC)
self.check([self.addon])
def test_public_nom_unlisted(self):
self.get_addon(amo.STATUS_PUBLIC, amo.STATUS_NOMINATED)
self.check([])
def test_public_lite_unlisted(self):
self.get_addon(amo.STATUS_PUBLIC, amo.STATUS_LITE)
self.check([])
def test_lite_unreviewed_unlisted(self):
self.get_addon(amo.STATUS_LITE, amo.STATUS_UNREVIEWED)
self.check([])
def test_lite_lite_listed(self):
self.get_addon(amo.STATUS_LITE, amo.STATUS_LITE)
self.check([self.addon])
def test_lite_lan_listed(self):
self.get_addon(amo.STATUS_LITE, amo.STATUS_LITE_AND_NOMINATED)
self.check([self.addon])
def test_lan_unreviewed_unlisted(self):
self.get_addon(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_UNREVIEWED)
self.check([])
def test_lan_lite_listed(self):
self.get_addon(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_LITE)
self.check([self.addon])
def test_lan_public_listed(self):
self.get_addon(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_PUBLIC)
self.check([self.addon])
def test_unreviewed_public_unlisted(self):
self.get_addon(amo.STATUS_UNREVIEWED, amo.STATUS_PUBLIC)
self.check([])
def test_nom_public_unlisted(self):
self.get_addon(amo.STATUS_NOMINATED, amo.STATUS_PUBLIC)
self.check([])
class BaseSearchToolsTest(amo.tests.TestCase):
fixtures = ('base/apps', 'base/featured', 'addons/featured',
'base/category', 'addons/listed')
def setUp(self):
super(BaseSearchToolsTest, self).setUp()
# Transform bookmarks into a search category:
Category.objects.filter(slug='bookmarks').update(type=amo.ADDON_SEARCH)
def setup_featured_tools_and_extensions(self):
# Pretend all Add-ons are search-related:
Addon.objects.update(type=amo.ADDON_SEARCH)
# One will be an extension in the search category:
limon = Addon.objects.get(
name__localized_string='Limon free English-Hebrew dictionary')
limon.type = amo.ADDON_EXTENSION
limon.status = amo.STATUS_PUBLIC
limon.save()
AppSupport(addon=limon, app_id=amo.FIREFOX.id).save()
# Another will be a search add-on in the search category:
readit = Addon.objects.get(name__localized_string='Read It Later')
readit.type = amo.ADDON_SEARCH
readit.status = amo.STATUS_PUBLIC
readit.save()
# Un-feature all others:
Feature.objects.all().delete()
# Feature foxy :
foxy = Addon.objects.get(name__localized_string='FoxyProxy Standard')
Feature(addon=foxy, application_id=amo.FIREFOX.id,
start=datetime.now(),
end=datetime.now() + timedelta(days=30)).save()
# Feature Limon Dictionary and Read It Later as a category feature:
s = Category.objects.get(slug='search-tools')
s.addoncategory_set.add(AddonCategory(addon=limon, feature=True))
s.addoncategory_set.add(AddonCategory(addon=readit, feature=True))
s.save()
self.reset_featured_addons()
class TestSearchToolsPages(BaseSearchToolsTest):
def test_landing_page(self):
self.setup_featured_tools_and_extensions()
response = self.client.get(reverse('browse.search-tools'))
eq_(response.status_code, 200)
doc = pq(response.content)
# Should have only featured add-ons:
eq_(sorted([a.name.localized_string
for a in response.context['addons'].object_list]),
[u'FoxyProxy Standard', u'Limon free English-Hebrew dictionary',
u'Read It Later'])
# Ensure that all heading links have the proper base URL
# between the category / no category cases.
sort_links = [urlparse(a.attrib['href']).path for a in
doc('.listing-header ul li a')]
eq_(set(sort_links), set([reverse('browse.search-tools')]))
def test_sidebar_extensions_links(self):
response = self.client.get(reverse('browse.search-tools'))
eq_(response.status_code, 200)
doc = pq(response.content)
links = doc('#search-tools-sidebar a')
eq_([a.text.strip() for a in links], [
# Search Extensions
'Most Popular', 'Recently Added',
# Search Providers
'Bookmarks'])
search_ext_url = urlparse(reverse('browse.extensions',
kwargs=dict(category='search-tools')))
eq_(urlparse(links[0].attrib['href']).path, search_ext_url.path)
eq_(urlparse(links[1].attrib['href']).path, search_ext_url.path)
def test_additional_resources(self):
for prefix, app in (
('/en-US/firefox', amo.FIREFOX.pretty),
('/en-US/seamonkey', amo.SEAMONKEY.pretty)):
app = unicode(app) # get the proxied unicode obj
response = self.client.get('%s/search-tools/' % prefix)
eq_(response.status_code, 200)
doc = pq(response.content)
txt = doc('#additional-resources ul li:eq(0)').text()
assert txt.endswith(app), "Expected %r got: %r" % (app, txt)
def test_search_tools_arent_friends_with_everyone(self):
# Search tools only show up for Firefox
response = self.client.get('/en-US/thunderbird/search-tools/')
doc = pq(response.content)
assert not doc('#search-tools-sidebar')
def test_other_pages_exclude_extensions(self):
# pretend all Add-ons are search-related:
Addon.objects.update(type=amo.ADDON_SEARCH)
# randomly make one an extension to be sure it is filtered out:
Addon.objects.valid()[0].update(type=amo.ADDON_EXTENSION)
for sort_key in ('name', 'updated', 'created', 'popular', 'rating'):
url = reverse('browse.search-tools') + '?sort=' + sort_key
r = self.client.get(url)
all_addons = r.context['addons'].object_list
assert len(all_addons)
for addon in all_addons:
assert addon.type == amo.ADDON_SEARCH, (
"sort=%s; Unexpected Add-on type for %r" % (
sort_key, addon))
def test_no_featured_addons_by_category(self):
Feature.objects.all().delete()
# Pretend Foxy is a bookmarks related search add-on
foxy = Addon.objects.get(name__localized_string='FoxyProxy Standard')
foxy.type = amo.ADDON_SEARCH
foxy.save()
bookmarks = Category.objects.get(slug='bookmarks')
bookmarks.addoncategory_set.add(
AddonCategory(addon=foxy, feature=False))
bookmarks.save()
response = self.client.get(reverse('browse.search-tools',
args=('bookmarks',)))
eq_(response.status_code, 200)
doc = pq(response.content)
eq_([a.name.localized_string
for a in response.context['addons'].object_list],
[u'FoxyProxy Standard'])
eq_(response.context['filter'].field, 'popular')
eq_(doc('title').text(),
'Bookmarks :: Search Tools :: Add-ons for Firefox')
# Ensure that all heading links have the proper base URL
# between the category / no category cases.
sort_links = [urlparse(a.attrib['href']).path for a in
doc('.listing-header ul li a')]
eq_(set(sort_links), set([reverse('browse.search-tools',
args=('bookmarks',))]))
def test_rss_links_per_page(self):
def get_link(url):
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
return doc('head link[type="application/rss+xml"]').attr('href')
eq_(get_link(reverse('browse.search-tools')),
reverse('browse.search-tools.rss') + '?sort=featured')
eq_(get_link(reverse('browse.search-tools') + '?sort=name'),
reverse('browse.search-tools.rss') + '?sort=name')
eq_(get_link(reverse('browse.search-tools', args=('bookmarks',))),
reverse('browse.search-tools.rss',
args=('bookmarks',)) + '?sort=popular')
class TestSearchToolsFeed(BaseSearchToolsTest):
def test_featured_search_tools(self):
self.setup_featured_tools_and_extensions()
url = reverse('browse.search-tools.rss') + '?sort=featured'
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('rss channel title')[0].text,
'Search Tools :: Add-ons for Firefox')
link = doc('rss channel link')[0].text
rel_link = reverse('browse.search-tools.rss') + '?sort=featured'
assert link.endswith(rel_link), ('Unexpected link: %r' % link)
eq_(doc('rss channel description')[0].text,
"Search tools and search-related extensions")
# There should be two features: one search tool and one extension.
eq_(sorted([e.text for e in doc('rss channel item title')]),
['FoxyProxy Standard 2.17',
'Limon free English-Hebrew dictionary 0.5.3',
'Read It Later 2.0.3'])
def test_search_tools_no_sorting(self):
url = reverse('browse.search-tools.rss')
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
link = doc('rss channel link')[0].text
rel_link = reverse('browse.search-tools.rss') + '?sort=popular'
assert link.endswith(rel_link), ('Unexpected link: %r' % link)
def test_search_tools_by_name(self):
# Pretend Foxy is a search add-on
(Addon.objects.filter(name__localized_string='FoxyProxy Standard')
.update(type=amo.ADDON_SEARCH))
url = reverse('browse.search-tools.rss') + '?sort=name'
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('rss channel description')[0].text, 'Search tools')
# There should be only search tools.
eq_([e.text for e in doc('rss channel item title')],
['FoxyProxy Standard 2.17'])
def test_search_tools_within_a_category(self):
# Pretend Foxy is the only bookmarks related search add-on
AddonCategory.objects.all().delete()
foxy = Addon.objects.get(name__localized_string='FoxyProxy Standard')
foxy.type = amo.ADDON_SEARCH
foxy.save()
bookmarks = Category.objects.get(slug='bookmarks')
bookmarks.addoncategory_set.add(
AddonCategory(addon=foxy, feature=False))
bookmarks.save()
url = reverse('browse.search-tools.rss',
args=('bookmarks',)) + '?sort=popular'
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('rss channel title')[0].text,
'Bookmarks :: Search Tools :: Add-ons for Firefox')
link = doc('rss channel link')[0].text
rel_link = reverse('browse.search-tools.rss',
args=('bookmarks',)) + '?sort=popular'
assert link.endswith(rel_link), ('Unexpected link: %r' % link)
eq_(doc('rss channel description')[0].text,
"Search tools relating to Bookmarks")
eq_([e.text for e in doc('rss channel item title')],
['FoxyProxy Standard 2.17'])
def test_non_ascii_titles(self):
bookmarks = Category.objects.get(slug='bookmarks')
bookmarks.name = u'Ivan Krstić'
bookmarks.save()
url = reverse('browse.search-tools.rss',
args=('bookmarks',))
r = self.client.get(url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('rss channel title')[0].text,
u'Ivan Krstić :: Search Tools :: Add-ons for Firefox')
class TestLegacyRedirects(amo.tests.TestCase):
fixtures = ('base/category.json',)
def test_types(self):
def redirects(from_, to):
r = self.client.get('/en-US/firefox' + from_)
self.assertRedirects(r, '/en-US/firefox' + to, status_code=301,
msg_prefix="Redirection failed: %s" % to)
redirects('/browse/type:1', '/extensions/')
redirects('/browse/type:1/', '/extensions/')
redirects('/browse/type:1/cat:all', '/extensions/')
redirects('/browse/type:1/cat:all/', '/extensions/')
redirects('/browse/type:1/cat:72', '/extensions/alerts-updates/')
redirects('/browse/type:1/cat:72/', '/extensions/alerts-updates/')
redirects('/browse/type:1/cat:72/sort:newest/format:rss',
'/extensions/alerts-updates/format:rss?sort=created')
redirects('/browse/type:1/cat:72/sort:weeklydownloads/format:rss',
'/extensions/alerts-updates/format:rss?sort=popular')
redirects('/browse/type:2', '/themes/')
redirects('/browse/type:3', '/language-tools/')
redirects('/browse/type:4', '/search-tools/')
redirects('/search-engines', '/search-tools/')
# redirects('/browse/type:7', '/plugins/')
redirects('/recommended', '/extensions/?sort=featured')
redirects('/featured', '/extensions/?sort=featured')
redirects('/recommended/format:rss', '/featured/format:rss')
class TestFeaturedPage(amo.tests.TestCase):
fixtures = ['base/apps', 'base/featured', 'addons/featured']
@mock.patch.object(settings, 'NEW_FEATURES', False)
def test_featured_addons(self):
"""Make sure that only featured extensions are shown."""
url = reverse('browse.extensions') + '?sort=featured'
response = self.client.get(url)
# But not in the content.
featured = (Addon.objects.listed(amo.FIREFOX)
.filter(type=amo.ADDON_EXTENSION)
.values_list('id', flat=True))
eq_(sorted(featured),
sorted(a.id for a in response.context['addons']))
class TestCategoriesFeed(amo.tests.TestCase):
def setUp(self):
self.feed = feeds.CategoriesRss()
self.u = u'Ελληνικά'
self.wut = Translation(localized_string=self.u, locale='el')
self.feed.request = mock.Mock()
self.feed.request.APP.pretty = self.u
self.category = Category(name=self.u)
self.addon = Addon(name=self.u, id=2, type=1, slug='xx')
self.addon._current_version = Version(version='v%s' % self.u)
def test_title(self):
eq_(self.feed.title(self.category),
u'%s :: Add-ons for %s' % (self.wut, self.u))
def test_item_title(self):
eq_(self.feed.item_title(self.addon),
u'%s v%s' % (self.u, self.u))
def test_item_guid(self):
t = self.feed.item_guid(self.addon)
url = u'/addon/%s/versions/v%s' % (self.addon.slug,
urllib.urlquote(self.u))
assert t.endswith(url), t
class TestFeaturedFeed(amo.tests.TestCase):
fixtures = ['addons/featured', 'base/addon_3615', 'base/apps',
'base/collections', 'base/featured', 'base/users']
def setUp(self):
patcher = mock.patch.object(settings, 'NEW_FEATURES', False)
patcher.start()
self.addCleanup(patcher.stop)
def test_feed_elements_present(self):
url = reverse('browse.featured.rss')
r = self.client.get(url, follow=True)
doc = pq(r.content)
eq_(doc('rss channel title')[0].text,
'Featured Add-ons :: Add-ons for Firefox')
assert doc('rss channel link')[0].text.endswith('/en-US/firefox/')
eq_(doc('rss channel description')[0].text,
"Here's a few of our favorite add-ons to help you get "
"started customizing Firefox.")
eq_(len(doc('rss channel item')),
Addon.objects.featured(amo.FIREFOX).count())
class TestNewFeaturedFeed(TestFeaturedFeed):
fixtures = TestFeaturedFeed.fixtures + ['bandwagon/featured_collections']
def setUp(self):
patcher = mock.patch.object(settings, 'NEW_FEATURES', True)
patcher.start()
self.addCleanup(patcher.stop)
class TestPersonas(amo.tests.TestCase):
fixtures = ('base/apps', 'base/featured', 'addons/featured',
'addons/persona')
def test_personas_grid(self):
"""Show grid page if there are fewer than 5 Personas."""
base = (Addon.objects.public().filter(type=amo.ADDON_PERSONA)
.extra(select={'_app': amo.FIREFOX.id}))
eq_(base.count(), 2)
r = self.client.get(reverse('browse.personas'))
self.assertTemplateUsed(r, 'browse/personas/grid.html')
eq_(r.status_code, 200)
assert 'is_homepage' in r.context
def test_personas_landing(self):
"""Show landing page if there are greater than 4 Personas."""
for i in xrange(3):
a = Addon(type=amo.ADDON_PERSONA)
a.name = 'persona-%s' % i
a.all_categories = []
a.save()
v = Version.objects.get(addon=Addon.objects.get(id=15679))
v.addon = a
v.pk = None
v.save()
p = Persona(addon_id=a.id, persona_id=i)
p.save()
a.persona = p
a._current_version = v
a.status = amo.STATUS_PUBLIC
a.save()
base = (Addon.objects.public().filter(type=amo.ADDON_PERSONA)
.extra(select={'_app': amo.FIREFOX.id}))
eq_(base.count(), 5)
r = self.client.get(reverse('browse.personas'))
self.assertTemplateUsed(r, 'browse/personas/category_landing.html')
def test_personas_category_landing(self):
"""Ensure we hit a grid page if there's a category and no sorting."""
grid = 'browse/personas/grid.html'
landing = 'browse/personas/category_landing.html'
category = Category(type=amo.ADDON_PERSONA, slug='abc',
application=Application.objects.get(id=amo.FIREFOX.id))
category.save()
category_url = reverse('browse.personas', args=[category.slug])
r = self.client.get(category_url + '?sort=created')
self.assertTemplateUsed(r, grid)
r = self.client.get(category_url)
self.assertTemplateUsed(r, grid)
# Category with 5 add-ons should bring us to a landing page.
category.count = 5
category.save()
r = self.client.get(category_url)
self.assertTemplateUsed(r, landing)
class TestMobileFeatured(TestMobile):
def test_featured(self):
r = self.client.get(reverse('browse.extensions') + '?sort=featured')
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'browse/mobile/extensions.html')
eq_(r.context['sorting'], 'featured')
class TestMobileExtensions(TestMobile):
def test_extensions(self):
r = self.client.get(reverse('browse.extensions'))
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'browse/mobile/extensions.html')
self.assertTemplateUsed(r, 'addons/listing/items_mobile.html')
eq_(r.context['category'], None)
eq_(pq(r.content)('.addon-listing .listview').length, 1)
def test_category(self):
cat = Category.objects.all()[0]
r = self.client.get(reverse('browse.extensions', args=[cat.slug]))
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'browse/mobile/extensions.html')
self.assertTemplateUsed(r, 'addons/listing/items_mobile.html')
eq_(r.context['category'], cat)
eq_(pq(r.content)('.addon-listing .listview').length, 1)
class TestMobilePersonas(TestMobile):
fixtures = TestMobile.fixtures + ['addons/persona']
def test_personas_home(self):
r = self.client.get(reverse('browse.personas'))
eq_(r.status_code, 200)
self.assertTemplateUsed(r,
'browse/personas/mobile/category_landing.html')
eq_(r.context['category'], None)
assert 'is_homepage' in r.context
def test_personas_home_title(self):
r = self.client.get(reverse('browse.personas'))
doc = pq(r.content)
eq_(doc('title').text(), 'Personas :: Add-ons for Firefox')
def test_personas_search(self):
r = self.client.get(reverse('browse.personas'))
eq_(r.context['search_cat'], 'personas')
s = pq(r.content)('#search')
eq_(s.attr('action'), reverse('search.search'))
eq_(s.find('input[name=q]').attr('placeholder'), 'search for personas')
eq_(s.find('input[name=cat]').val(), 'personas')
def _create_persona_cat(self):
category = Category(type=amo.ADDON_PERSONA, slug='xxx',
application_id=amo.FIREFOX.id)
category.save()
return category
def test_personas_grid(self):
"""Ensure we always hit grid page if there's a category or sorting."""
grid = 'browse/personas/mobile/grid.html'
category = self._create_persona_cat()
category_url = reverse('browse.personas', args=[category.slug])
# Even if the category has 5 add-ons.
category.count = 5
category.save()
r = self.client.get(category_url)
self.assertTemplateUsed(r, grid)
# Show the grid page even with sorting.
r = self.client.get(reverse('browse.personas') + '?sort=created')
self.assertTemplateUsed(r, grid)
r = self.client.get(category_url + '?sort=created')
self.assertTemplateUsed(r, grid)
def test_personas_category_title(self):
r = self.client.get(reverse('browse.personas',
args=[self._create_persona_cat().slug]))
doc = pq(r.content)
eq_(doc('title').text(), 'None Personas :: Add-ons for Firefox')
def test_personas_sorting_title(self):
r = self.client.get(reverse('browse.personas') + '?sort=up-and-coming')
doc = pq(r.content)
eq_(doc('title').text(), 'Up & Coming Personas :: Add-ons for Firefox')
|
daimajia/duktape
|
refs/heads/master
|
util/autofix_debuglog_calls.py
|
15
|
#!/usr/bin/python
#
# Automatically fix one-line broken debug log calls. Adds a missing
# wrapper for such lines, e.g. changes:
#
# DUK_DPRINT(...);
#
# into:
#
# DUK_D(DUK_DPRINT(...));
#
# Does not handle multiline log calls.
#
# Usage:
#
# $ python autofix_debuglog_calls.py src/*.c
#
# WARNING: works in place, so commit any changes before running, then
# check diff.
#
import os
import sys
import re
re_callsite = re.compile(r'^\s*(DUK_D+PRINT).*?;$')
wrappers = {
'DUK_DPRINT': 'DUK_D',
'DUK_DDPRINT': 'DUK_DD',
'DUK_DDDPRINT': 'DUK_DDD'
}
warnings = []
def process(filename):
f = open(filename, 'rb')
output = []
linenumber = 0
fixes = 0
for line in f:
linenumber += 1
if 'DPRINT' not in line:
output.append(line)
continue
m = re_callsite.match(line)
if m is None:
output.append(line)
continue
log_macro = m.group(1)
log_wrapper = wrappers[log_macro]
line = line.replace(log_macro, log_wrapper + '(' + log_macro) # DUK_DPRINT( -> DUK_D(DUK_DPRINT(
line = line.replace(');', '));') # ...); -> ...));
output.append(line)
fixes += 1
f.close()
if fixes > 0:
print '%s: %d fixes' % (filename, fixes)
f = open(filename, 'wb')
f.write(''.join(output))
f.close()
def main():
for filename in sys.argv[1:]:
process(filename)
if __name__ == '__main__':
main()
|
shakamunyi/sahara
|
refs/heads/master
|
sahara/api/middleware/log_exchange.py
|
5
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# It's based on debug middleware from oslo-incubator
"""Debug middleware"""
from __future__ import print_function
import sys
from oslo_middleware import base
import six
import webob.dec
class LogExchange(base.Middleware):
"""Helper class that returns debug information.
Can be inserted into any WSGI application chain to get information about
the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
if req.is_body_readable:
print(('*' * 40) + " REQUEST BODY")
if req.content_type == 'application/json':
print(req.json)
else:
print(req.body)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Prints the contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " RESPONSE BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
|
lukauskas/means
|
refs/heads/master
|
src/means/util/decorators.py
|
2
|
from functools import wraps
def cache(func):
cache = {}
@wraps(func)
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
|
zzxuanyuan/root-compressor-dummy
|
refs/heads/compressionbench
|
interpreter/llvm/src/utils/llvm-build/llvmbuild/main.py
|
50
|
from __future__ import absolute_import
import filecmp
import os
import sys
import llvmbuild.componentinfo as componentinfo
import llvmbuild.configutil as configutil
from llvmbuild.util import fatal, note
###
def cmake_quote_string(value):
"""
cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# Currently, we only handle escaping backslashes.
value = value.replace("\\", "\\\\")
return value
def cmake_quote_path(value):
"""
cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# CMake has a bug in it's Makefile generator that doesn't properly quote
# strings it generates. So instead of using proper quoting, we just use "/"
# style paths. Currently, we only handle escaping backslashes.
value = value.replace("\\", "/")
return value
def mk_quote_string_for_target(value):
"""
mk_quote_string_for_target(target_name) -> str
Return a quoted form of the given target_name suitable for including in a
Makefile as a target name.
"""
# The only quoting we currently perform is for ':', to support msys users.
return value.replace(":", "\\:")
def make_install_dir(path):
"""
make_install_dir(path) -> None
Create the given directory path for installation, including any parents.
"""
# os.makedirs considers it an error to be called with an existent path.
if not os.path.exists(path):
os.makedirs(path)
###
class LLVMProjectInfo(object):
@staticmethod
def load_infos_from_path(llvmbuild_source_root):
def recurse(subpath):
# Load the LLVMBuild file.
llvmbuild_path = os.path.join(llvmbuild_source_root + subpath,
'LLVMBuild.txt')
if not os.path.exists(llvmbuild_path):
fatal("missing LLVMBuild.txt file at: %r" % (llvmbuild_path,))
# Parse the components from it.
common,info_iter = componentinfo.load_from_path(llvmbuild_path,
subpath)
for info in info_iter:
yield info
# Recurse into the specified subdirectories.
for subdir in common.get_list("subdirectories"):
for item in recurse(os.path.join(subpath, subdir)):
yield item
return recurse("/")
@staticmethod
def load_from_path(source_root, llvmbuild_source_root):
infos = list(
LLVMProjectInfo.load_infos_from_path(llvmbuild_source_root))
return LLVMProjectInfo(source_root, infos)
def __init__(self, source_root, component_infos):
# Store our simple ivars.
self.source_root = source_root
self.component_infos = list(component_infos)
self.component_info_map = None
self.ordered_component_infos = None
def validate_components(self):
"""validate_components() -> None
Validate that the project components are well-defined. Among other
things, this checks that:
- Components have valid references.
- Components references do not form cycles.
We also construct the map from component names to info, and the
topological ordering of components.
"""
# Create the component info map and validate that component names are
# unique.
self.component_info_map = {}
for ci in self.component_infos:
existing = self.component_info_map.get(ci.name)
if existing is not None:
# We found a duplicate component name, report it and error out.
fatal("found duplicate component %r (at %r and %r)" % (
ci.name, ci.subpath, existing.subpath))
self.component_info_map[ci.name] = ci
# Disallow 'all' as a component name, which is a special case.
if 'all' in self.component_info_map:
fatal("project is not allowed to define 'all' component")
# Add the root component.
if '$ROOT' in self.component_info_map:
fatal("project is not allowed to define $ROOT component")
self.component_info_map['$ROOT'] = componentinfo.GroupComponentInfo(
'/', '$ROOT', None)
self.component_infos.append(self.component_info_map['$ROOT'])
# Topologically order the component information according to their
# component references.
def visit_component_info(ci, current_stack, current_set):
# Check for a cycles.
if ci in current_set:
# We found a cycle, report it and error out.
cycle_description = ' -> '.join(
'%r (%s)' % (ci.name, relation)
for relation,ci in current_stack)
fatal("found cycle to %r after following: %s -> %s" % (
ci.name, cycle_description, ci.name))
# If we have already visited this item, we are done.
if ci not in components_to_visit:
return
# Otherwise, mark the component info as visited and traverse.
components_to_visit.remove(ci)
# Validate the parent reference, which we treat specially.
if ci.parent is not None:
parent = self.component_info_map.get(ci.parent)
if parent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, ci.parent, 'parent'))
ci.set_parent_instance(parent)
for relation,referent_name in ci.get_component_references():
# Validate that the reference is ok.
referent = self.component_info_map.get(referent_name)
if referent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, referent_name, relation))
# Visit the reference.
current_stack.append((relation,ci))
current_set.add(ci)
visit_component_info(referent, current_stack, current_set)
current_set.remove(ci)
current_stack.pop()
# Finally, add the component info to the ordered list.
self.ordered_component_infos.append(ci)
# FIXME: We aren't actually correctly checking for cycles along the
# parent edges. Haven't decided how I want to handle this -- I thought
# about only checking cycles by relation type. If we do that, it falls
# out easily. If we don't, we should special case the check.
self.ordered_component_infos = []
components_to_visit = sorted(
set(self.component_infos),
key = lambda c: c.name)
while components_to_visit:
visit_component_info(components_to_visit[0], [], set())
# Canonicalize children lists.
for c in self.ordered_component_infos:
c.children.sort(key = lambda c: c.name)
def print_tree(self):
def visit(node, depth = 0):
print('%s%-40s (%s)' % (' '*depth, node.name, node.type_name))
for c in node.children:
visit(c, depth + 1)
visit(self.component_info_map['$ROOT'])
def write_components(self, output_path):
# Organize all the components by the directory their LLVMBuild file
# should go in.
info_basedir = {}
for ci in self.component_infos:
# Ignore the $ROOT component.
if ci.parent is None:
continue
info_basedir[ci.subpath] = info_basedir.get(ci.subpath, []) + [ci]
# Compute the list of subdirectories to scan.
subpath_subdirs = {}
for ci in self.component_infos:
# Ignore root components.
if ci.subpath == '/':
continue
# Otherwise, append this subpath to the parent list.
parent_path = os.path.dirname(ci.subpath)
subpath_subdirs[parent_path] = parent_list = subpath_subdirs.get(
parent_path, set())
parent_list.add(os.path.basename(ci.subpath))
# Generate the build files.
for subpath, infos in info_basedir.items():
# Order the components by name to have a canonical ordering.
infos.sort(key = lambda ci: ci.name)
# Format the components into llvmbuild fragments.
fragments = []
# Add the common fragments.
subdirectories = subpath_subdirs.get(subpath)
if subdirectories:
fragment = """\
subdirectories = %s
""" % (" ".join(sorted(subdirectories)),)
fragments.append(("common", fragment))
# Add the component fragments.
num_common_fragments = len(fragments)
for ci in infos:
fragment = ci.get_llvmbuild_fragment()
if fragment is None:
continue
name = "component_%d" % (len(fragments) - num_common_fragments)
fragments.append((name, fragment))
if not fragments:
continue
assert subpath.startswith('/')
directory_path = os.path.join(output_path, subpath[1:])
# Create the directory if it does not already exist.
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# In an effort to preserve comments (which aren't parsed), read in
# the original file and extract the comments. We only know how to
# associate comments that prefix a section name.
f = open(infos[0]._source_path)
comments_map = {}
comment_block = ""
for ln in f:
if ln.startswith(';'):
comment_block += ln
elif ln.startswith('[') and ln.endswith(']\n'):
comments_map[ln[1:-2]] = comment_block
else:
comment_block = ""
f.close()
# Create the LLVMBuild fil[e.
file_path = os.path.join(directory_path, 'LLVMBuild.txt')
f = open(file_path, "w")
# Write the header.
header_fmt = ';===- %s %s-*- Conf -*--===;'
header_name = '.' + os.path.join(subpath, 'LLVMBuild.txt')
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
""" % header_string)
# Write out each fragment.each component fragment.
for name,fragment in fragments:
comment = comments_map.get(name)
if comment is not None:
f.write(comment)
f.write("[%s]\n" % name)
f.write(fragment)
if fragment is not fragments[-1][1]:
f.write('\n')
f.close()
def write_library_table(self, output_path, enabled_optional_components):
# Write out the mapping from component names to required libraries.
#
# We do this in topological order so that we know we can append the
# dependencies for added library groups.
entries = {}
for c in self.ordered_component_infos:
# Skip optional components which are not enabled.
if c.type_name == 'OptionalLibrary' \
and c.name not in enabled_optional_components:
continue
# Skip target groups which are not enabled.
tg = c.get_parent_target_group()
if tg and not tg.enabled:
continue
# Only certain components are in the table.
if c.type_name not in ('Library', 'OptionalLibrary', \
'LibraryGroup', 'TargetGroup'):
continue
# Compute the llvm-config "component name". For historical reasons,
# this is lowercased based on the library name.
llvmconfig_component_name = c.get_llvmconfig_component_name()
# Get the library name, or None for LibraryGroups.
if c.type_name == 'Library' or c.type_name == 'OptionalLibrary':
library_name = c.get_prefixed_library_name()
is_installed = c.installed
else:
library_name = None
is_installed = True
# Get the component names of all the required libraries.
required_llvmconfig_component_names = [
self.component_info_map[dep].get_llvmconfig_component_name()
for dep in c.required_libraries]
# Insert the entries for library groups we should add to.
for dep in c.add_to_library_groups:
entries[dep][2].append(llvmconfig_component_name)
# Add the entry.
entries[c.name] = (llvmconfig_component_name, library_name,
required_llvmconfig_component_names,
is_installed)
# Convert to a list of entries and sort by name.
entries = list(entries.values())
# Create an 'all' pseudo component. We keep the dependency list small by
# only listing entries that have no other dependents.
root_entries = set(e[0] for e in entries)
for _,_,deps,_ in entries:
root_entries -= set(deps)
entries.append(('all', None, root_entries, True))
entries.sort()
# Compute the maximum number of required libraries, plus one so there is
# always a sentinel.
max_required_libraries = max(len(deps)
for _,_,deps,_ in entries) + 1
# Write out the library table.
make_install_dir(os.path.dirname(output_path))
f = open(output_path+'.new', 'w')
f.write("""\
//===- llvm-build generated file --------------------------------*- C++ -*-===//
//
// Component Library Depenedency Table
//
// Automatically generated file, do not edit!
//
//===----------------------------------------------------------------------===//
""")
f.write('struct AvailableComponent {\n')
f.write(' /// The name of the component.\n')
f.write(' const char *Name;\n')
f.write('\n')
f.write(' /// The name of the library for this component (or NULL).\n')
f.write(' const char *Library;\n')
f.write('\n')
f.write(' /// Whether the component is installed.\n')
f.write(' bool IsInstalled;\n')
f.write('\n')
f.write('\
/// The list of libraries required when linking this component.\n')
f.write(' const char *RequiredLibraries[%d];\n' % (
max_required_libraries))
f.write('} AvailableComponents[%d] = {\n' % len(entries))
for name,library_name,required_names,is_installed in entries:
if library_name is None:
library_name_as_cstr = 'nullptr'
else:
library_name_as_cstr = '"%s"' % library_name
if is_installed:
is_installed_as_cstr = 'true'
else:
is_installed_as_cstr = 'false'
f.write(' { "%s", %s, %s, { %s } },\n' % (
name, library_name_as_cstr, is_installed_as_cstr,
', '.join('"%s"' % dep
for dep in required_names)))
f.write('};\n')
f.close()
if not os.path.isfile(output_path):
os.rename(output_path+'.new', output_path)
elif filecmp.cmp(output_path, output_path+'.new'):
os.remove(output_path+'.new')
else:
os.remove(output_path)
os.rename(output_path+'.new', output_path)
def get_required_libraries_for_component(self, ci, traverse_groups = False):
"""
get_required_libraries_for_component(component_info) -> iter
Given a Library component info descriptor, return an iterator over all
of the directly required libraries for linking with this component. If
traverse_groups is True, then library and target groups will be
traversed to include their required libraries.
"""
assert ci.type_name in ('Library', 'OptionalLibrary', 'LibraryGroup', 'TargetGroup')
for name in ci.required_libraries:
# Get the dependency info.
dep = self.component_info_map[name]
# If it is a library, yield it.
if dep.type_name == 'Library' or dep.type_name == 'OptionalLibrary':
yield dep
continue
# Otherwise if it is a group, yield or traverse depending on what
# was requested.
if dep.type_name in ('LibraryGroup', 'TargetGroup'):
if not traverse_groups:
yield dep
continue
for res in self.get_required_libraries_for_component(dep, True):
yield res
def get_fragment_dependencies(self):
"""
get_fragment_dependencies() -> iter
Compute the list of files (as absolute paths) on which the output
fragments depend (i.e., files for which a modification should trigger a
rebuild of the fragment).
"""
# Construct a list of all the dependencies of the Makefile fragment
# itself. These include all the LLVMBuild files themselves, as well as
# all of our own sources.
#
# Many components may come from the same file, so we make sure to unique
# these.
build_paths = set()
for ci in self.component_infos:
p = os.path.join(self.source_root, ci.subpath[1:], 'LLVMBuild.txt')
if p not in build_paths:
yield p
build_paths.add(p)
# Gather the list of necessary sources by just finding all loaded
# modules that are inside the LLVM source tree.
for module in sys.modules.values():
# Find the module path.
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
# Strip off any compiled suffix.
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
# If the path exists and is in the source tree, consider it a
# dependency.
if (path.startswith(self.source_root) and os.path.exists(path)):
yield path
def foreach_cmake_library(self, f,
enabled_optional_components,
skip_disabled,
skip_not_installed):
for ci in self.ordered_component_infos:
# Skip optional components which are not enabled.
if ci.type_name == 'OptionalLibrary' \
and ci.name not in enabled_optional_components:
continue
# We only write the information for libraries currently.
if ci.type_name not in ('Library', 'OptionalLibrary'):
continue
# Skip disabled targets.
if skip_disabled:
tg = ci.get_parent_target_group()
if tg and not tg.enabled:
continue
# Skip targets that will not be installed
if skip_not_installed and not ci.installed:
continue
f(ci)
def write_cmake_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_fragment(output_path) -> None
Generate a CMake fragment which includes all of the collated LLVMBuild
information in a format that is easily digestible by a CMake. The exact
contents of this are closely tied to how the CMake configuration
integrates LLVMBuild, see CMakeLists.txt in the top-level.
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
# Write the header.
header_fmt = '\
#===-- %s - LLVMBuild Configuration for LLVM %s-*- CMake -*--===#'
header_name = os.path.basename(output_path)
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# This file contains the LLVMBuild project information in a format easily
# consumed by the CMake based build system.
#
# This file is autogenerated by llvm-build, do not edit!
#
#===------------------------------------------------------------------------===#
""" % header_string)
# Write the dependency information in the best way we can.
f.write("""
# LLVMBuild CMake fragment dependencies.
#
# CMake has no builtin way to declare that the configuration depends on
# a particular file. However, a side effect of configure_file is to add
# said input file to CMake's internal dependency list. So, we use that
# and a dummy output file to communicate the dependency information to
# CMake.
#
# FIXME: File a CMake RFE to get a properly supported version of this
# feature.
""")
for dep in dependencies:
f.write("""\
configure_file(\"%s\"
${CMAKE_CURRENT_BINARY_DIR}/DummyConfigureOutput)\n""" % (
cmake_quote_path(dep),))
# Write the properties we use to encode the required library dependency
# information in a form CMake can easily use directly.
f.write("""
# Explicit library dependency information.
#
# The following property assignments effectively create a map from component
# names to required libraries, in a way that is easily accessed from CMake.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(GLOBAL PROPERTY LLVMBUILD_LIB_DEPS_%s %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = False,
skip_not_installed = False # Dependency info must be emitted for internals libs too
)
f.close()
def write_cmake_exports_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_exports_fragment(output_path) -> None
Generate a CMake fragment which includes LLVMBuild library
dependencies expressed similarly to how CMake would write
them via install(EXPORT).
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake exports fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
f.write("""\
# Explicit library dependency information.
#
# The following property assignments tell CMake about link
# dependencies of libraries imported from LLVM.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(TARGET %s PROPERTY IMPORTED_LINK_INTERFACE_LIBRARIES %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = True,
skip_not_installed = True # Do not export internal libraries like gtest
)
f.close()
def write_make_fragment(self, output_path, enabled_optional_components):
"""
write_make_fragment(output_path) -> None
Generate a Makefile fragment which includes all of the collated
LLVMBuild information in a format that is easily digestible by a
Makefile. The exact contents of this are closely tied to how the LLVM
Makefiles integrate LLVMBuild, see Makefile.rules in the top-level.
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the Makefile fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
# Write the header.
header_fmt = '\
#===-- %s - LLVMBuild Configuration for LLVM %s-*- Makefile -*--===#'
header_name = os.path.basename(output_path)
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# This file contains the LLVMBuild project information in a format easily
# consumed by the Makefile based build system.
#
# This file is autogenerated by llvm-build, do not edit!
#
#===------------------------------------------------------------------------===#
""" % header_string)
# Write the dependencies for the fragment.
#
# FIXME: Technically, we need to properly quote for Make here.
f.write("""\
# Clients must explicitly enable LLVMBUILD_INCLUDE_DEPENDENCIES to get
# these dependencies. This is a compromise to help improve the
# performance of recursive Make systems.
""")
f.write('ifeq ($(LLVMBUILD_INCLUDE_DEPENDENCIES),1)\n')
f.write("# The dependencies for this Makefile fragment itself.\n")
f.write("%s: \\\n" % (mk_quote_string_for_target(output_path),))
for dep in dependencies:
f.write("\t%s \\\n" % (dep,))
f.write('\n')
# Generate dummy rules for each of the dependencies, so that things
# continue to work correctly if any of those files are moved or removed.
f.write("""\
# The dummy targets to allow proper regeneration even when files are moved or
# removed.
""")
for dep in dependencies:
f.write("%s:\n" % (mk_quote_string_for_target(dep),))
f.write('endif\n')
f.write("""
# List of libraries to be exported for use by applications.
# See 'cmake/modules/Makefile'.
LLVM_LIBS_TO_EXPORT :=""")
self.foreach_cmake_library(
lambda ci:
f.write(' \\\n %s' % ci.get_prefixed_library_name())
,
enabled_optional_components,
skip_disabled = True,
skip_not_installed = True # Do not export internal libraries like gtest
)
f.write('\n')
f.close()
def add_magic_target_components(parser, project, opts):
"""add_magic_target_components(project, opts) -> None
Add the "magic" target based components to the project, which can only be
determined based on the target configuration options.
This currently is responsible for populating the required_libraries list of
the "all-targets", "Native", "NativeCodeGen", and "Engine" components.
"""
# Determine the available targets.
available_targets = dict((ci.name,ci)
for ci in project.component_infos
if ci.type_name == 'TargetGroup')
# Find the configured native target.
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
native_target_name = { 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
if native_target_name is None:
native_target = None
else:
native_target = available_targets.get(native_target_name)
if native_target is None:
parser.error("invalid native target: %r (not in project)" % (
opts.native_target,))
if native_target.type_name != 'TargetGroup':
parser.error("invalid native target: %r (not a target)" % (
opts.native_target,))
# Find the list of targets to enable.
if opts.enable_targets is None:
enable_targets = available_targets.values()
else:
# We support both space separated and semi-colon separated lists.
if opts.enable_targets == '':
enable_target_names = []
elif ' ' in opts.enable_targets:
enable_target_names = opts.enable_targets.split()
else:
enable_target_names = opts.enable_targets.split(';')
enable_targets = []
for name in enable_target_names:
target = available_targets.get(name)
if target is None:
parser.error("invalid target to enable: %r (not in project)" % (
name,))
if target.type_name != 'TargetGroup':
parser.error("invalid target to enable: %r (not a target)" % (
name,))
enable_targets.append(target)
# Find the special library groups we are going to populate. We enforce that
# these appear in the project (instead of just adding them) so that they at
# least have an explicit representation in the project LLVMBuild files (and
# comments explaining how they are populated).
def find_special_group(name):
info = info_map.get(name)
if info is None:
fatal("expected project to contain special %r component" % (
name,))
if info.type_name != 'LibraryGroup':
fatal("special component %r should be a LibraryGroup" % (
name,))
if info.required_libraries:
fatal("special component %r must have empty %r list" % (
name, 'required_libraries'))
if info.add_to_library_groups:
fatal("special component %r must have empty %r list" % (
name, 'add_to_library_groups'))
info._is_special_group = True
return info
info_map = dict((ci.name, ci) for ci in project.component_infos)
all_targets = find_special_group('all-targets')
native_group = find_special_group('Native')
native_codegen_group = find_special_group('NativeCodeGen')
engine_group = find_special_group('Engine')
# Set the enabled bit in all the target groups, and append to the
# all-targets list.
for ci in enable_targets:
all_targets.required_libraries.append(ci.name)
ci.enabled = True
# If we have a native target, then that defines the native and
# native_codegen libraries.
if native_target and native_target.enabled:
native_group.required_libraries.append(native_target.name)
native_codegen_group.required_libraries.append(
'%sCodeGen' % native_target.name)
# If we have a native target with a JIT, use that for the engine. Otherwise,
# use the interpreter.
if native_target and native_target.enabled and native_target.has_jit:
engine_group.required_libraries.append('MCJIT')
engine_group.required_libraries.append(native_group.name)
else:
engine_group.required_libraries.append('Interpreter')
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options]")
group = OptionGroup(parser, "Input Options")
group.add_option("", "--source-root", dest="source_root", metavar="PATH",
help="Path to the LLVM source (inferred if not given)",
action="store", default=None)
group.add_option("", "--llvmbuild-source-root",
dest="llvmbuild_source_root",
help=(
"If given, an alternate path to search for LLVMBuild.txt files"),
action="store", default=None, metavar="PATH")
group.add_option("", "--build-root", dest="build_root", metavar="PATH",
help="Path to the build directory (if needed) [%default]",
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Output Options")
group.add_option("", "--print-tree", dest="print_tree",
help="Print out the project component tree [%default]",
action="store_true", default=False)
group.add_option("", "--write-llvmbuild", dest="write_llvmbuild",
help="Write out the LLVMBuild.txt files to PATH",
action="store", default=None, metavar="PATH")
group.add_option("", "--write-library-table",
dest="write_library_table", metavar="PATH",
help="Write the C++ library dependency table to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-fragment",
dest="write_cmake_fragment", metavar="PATH",
help="Write the CMake project information to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-exports-fragment",
dest="write_cmake_exports_fragment", metavar="PATH",
help="Write the CMake exports information to PATH",
action="store", default=None)
group.add_option("", "--write-make-fragment",
dest="write_make_fragment", metavar="PATH",
help="Write the Makefile project information to PATH",
action="store", default=None)
group.add_option("", "--configure-target-def-file",
dest="configure_target_def_files",
help="""Configure the given file at SUBPATH (relative to
the inferred or given source root, and with a '.in' suffix) by replacing certain
substitution variables with lists of targets that support certain features (for
example, targets with AsmPrinters) and write the result to the build root (as
given by --build-root) at the same SUBPATH""",
metavar="SUBPATH", action="append", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Configuration Options")
group.add_option("", "--native-target",
dest="native_target", metavar="NAME",
help=("Treat the named target as the 'native' one, if "
"given [%default]"),
action="store", default=None)
group.add_option("", "--enable-targets",
dest="enable_targets", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of targets, or all targets if not present"),
action="store", default=None)
group.add_option("", "--enable-optional-components",
dest="optional_components", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of optional components"),
action="store", default="")
parser.add_option_group(group)
(opts, args) = parser.parse_args()
# Determine the LLVM source path, if not given.
source_root = opts.source_root
if source_root:
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('invalid LLVM source root: %r' % source_root)
else:
llvmbuild_path = os.path.dirname(__file__)
llvm_build_path = os.path.dirname(llvmbuild_path)
utils_path = os.path.dirname(llvm_build_path)
source_root = os.path.dirname(utils_path)
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('unable to infer LLVM source root, please specify')
# Construct the LLVM project information.
llvmbuild_source_root = opts.llvmbuild_source_root or source_root
project_info = LLVMProjectInfo.load_from_path(
source_root, llvmbuild_source_root)
# Add the magic target based components.
add_magic_target_components(parser, project_info, opts)
# Validate the project component info.
project_info.validate_components()
# Print the component tree, if requested.
if opts.print_tree:
project_info.print_tree()
# Write out the components, if requested. This is useful for auto-upgrading
# the schema.
if opts.write_llvmbuild:
project_info.write_components(opts.write_llvmbuild)
# Write out the required library table, if requested.
if opts.write_library_table:
project_info.write_library_table(opts.write_library_table,
opts.optional_components)
# Write out the make fragment, if requested.
if opts.write_make_fragment:
project_info.write_make_fragment(opts.write_make_fragment,
opts.optional_components)
# Write out the cmake fragment, if requested.
if opts.write_cmake_fragment:
project_info.write_cmake_fragment(opts.write_cmake_fragment,
opts.optional_components)
if opts.write_cmake_exports_fragment:
project_info.write_cmake_exports_fragment(opts.write_cmake_exports_fragment,
opts.optional_components)
# Configure target definition files, if requested.
if opts.configure_target_def_files:
# Verify we were given a build root.
if not opts.build_root:
parser.error("must specify --build-root when using "
"--configure-target-def-file")
# Create the substitution list.
available_targets = [ci for ci in project_info.component_infos
if ci.type_name == 'TargetGroup']
substitutions = [
("@LLVM_ENUM_TARGETS@",
' '.join('LLVM_TARGET(%s)' % ci.name
for ci in available_targets)),
("@LLVM_ENUM_ASM_PRINTERS@",
' '.join('LLVM_ASM_PRINTER(%s)' % ci.name
for ci in available_targets
if ci.has_asmprinter)),
("@LLVM_ENUM_ASM_PARSERS@",
' '.join('LLVM_ASM_PARSER(%s)' % ci.name
for ci in available_targets
if ci.has_asmparser)),
("@LLVM_ENUM_DISASSEMBLERS@",
' '.join('LLVM_DISASSEMBLER(%s)' % ci.name
for ci in available_targets
if ci.has_disassembler))]
# Configure the given files.
for subpath in opts.configure_target_def_files:
inpath = os.path.join(source_root, subpath + '.in')
outpath = os.path.join(opts.build_root, subpath)
result = configutil.configure_file(inpath, outpath, substitutions)
if not result:
note("configured file %r hasn't changed" % outpath)
if __name__=='__main__':
main()
|
kaltsimon/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/malemotion.py
|
108
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class MalemotionIE(InfoExtractor):
_VALID_URL = r'https?://malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
_TEST = {
'url': 'http://malemotion.com/video/bete-de-concours.ltc',
'md5': '3013e53a0afbde2878bc39998c33e8a5',
'info_dict': {
'id': 'ltc',
'ext': 'mp4',
'title': 'Bête de Concours',
'age_limit': 18,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = compat_urllib_parse_unquote(self._search_regex(
r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
video_thumbnail = self._search_regex(
r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'mp4',
'preference': 1,
}]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'age_limit': 18,
}
|
hongbin/magnum
|
refs/heads/master
|
magnum/common/safe_utils.py
|
16
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
import inspect
def getcallargs(function, *args, **kwargs):
"""This is a simplified inspect.getcallargs (2.7+).
It should be replaced when python >= 2.7 is standard.
"""
keyed_args = {}
argnames, varargs, keywords, defaults = inspect.getargspec(function)
keyed_args.update(kwargs)
# NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if 'self' in argnames[0] or 'cls' == argnames[0]:
# The function may not actually be a method or have __self__.
# Typically seen when it's stubbed with mox.
if inspect.ismethod(function) and hasattr(function, '__self__'):
keyed_args[argnames[0]] = function.__self__
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:
num_defaults = len(defaults)
for argname, value in zip(argnames[-num_defaults:], defaults):
if argname not in keyed_args:
keyed_args[argname] = value
return keyed_args
|
mheap/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/ce_config.py
|
29
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ce import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
GoogleCloudPlatform/cloud-opensource-python
|
refs/heads/master
|
compatibility_lib/compatibility_lib/test_fake_compatibility_store.py
|
1
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compatibility_store.CompatibilityStore."""
import datetime
import unittest
from unittest import mock
from compatibility_lib import compatibility_store
from compatibility_lib import fake_compatibility_store
from compatibility_lib import package
PACKAGE_1 = package.Package("package1")
PACKAGE_2 = package.Package("package2")
PACKAGE_3 = package.Package("package3")
PACKAGE_4 = package.Package("package4")
RECENT_DEPS_1 = {
'package1': {
'current_time': datetime.datetime.utcnow(),
'installed_version': '1.13.0',
'installed_version_time': datetime.datetime.utcnow(),
'is_latest': 1,
'latest_version': '1.13.0',
'latest_version_time': datetime.datetime.utcnow(),
},
'six': {
'current_time': datetime.datetime.utcnow(),
'installed_version': '1.2.3',
'installed_version_time': datetime.datetime.utcnow(),
'is_latest': 1,
'latest_version': '1.2.3',
'latest_version_time': datetime.datetime.utcnow(),
}
}
OLD_DEP_VERSIONS_1 = {
'package1': {
'current_time': datetime.datetime.utcnow(),
'installed_version': '1.12.0',
'installed_version_time': datetime.datetime.utcnow(),
'is_latest': 1,
'latest_version': '1.12.0',
'latest_version_time': datetime.datetime.utcnow(),
},
'six': {
'current_time': datetime.datetime.utcnow(),
'installed_version': '1.2.2',
'installed_version_time': datetime.datetime.utcnow(),
'is_latest': 1,
'latest_version': '1.2.2',
'latest_version_time': datetime.datetime.utcnow(),
}
}
PACKAGE_1_PY2_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_1_PY2_OLD_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2018, 1, 1),
)
PACKAGE_1_PY3_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_1_PY3_CR_WITH_RECENT_DEPS = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info=RECENT_DEPS_1,
)
PACKAGE_1_PY3_CR_WITH_OLD_DEPS_VERS = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info=OLD_DEP_VERSIONS_1,
)
PACKAGE_2_PY2_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_2],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_2_PY3_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_2],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_3_PY2_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_3],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_3_PY3_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_3],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_1_AND_2_PY2_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
)
PACKAGE_1_AND_2_PY2_OLD_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2018, 1, 1),
)
PACKAGE_1_AND_2_PY3_CR = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1, PACKAGE_2],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
)
@mock.patch('compatibility_lib.fake_compatibility_store.configs.PKG_LIST',
[
'package1',
'package2',
'package3',
'package4',])
class TestCompatibilityStore(unittest.TestCase):
def setUp(self):
self._store = fake_compatibility_store.CompatibilityStore()
def test_get_self_compatibility(self):
crs = [PACKAGE_1_PY2_CR, PACKAGE_1_PY2_OLD_CR, PACKAGE_1_PY3_CR,
PACKAGE_2_PY2_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
frozenset(self._store.get_self_compatibility(PACKAGE_1)),
frozenset([PACKAGE_1_PY2_CR, PACKAGE_1_PY3_CR]))
self.assertEqual(
frozenset(self._store.get_self_compatibility(PACKAGE_2)),
frozenset([PACKAGE_2_PY2_CR]))
def test_get_self_compatibility_no_result(self):
crs = [PACKAGE_1_PY2_CR, PACKAGE_1_PY2_OLD_CR, PACKAGE_1_PY3_CR,
PACKAGE_2_PY2_CR,
PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertFalse(
frozenset(self._store.get_self_compatibility(PACKAGE_3)))
def test_get_self_compatibilities(self):
crs = [PACKAGE_1_PY2_CR, PACKAGE_1_PY2_OLD_CR, PACKAGE_1_PY3_CR,
PACKAGE_2_PY2_CR,
PACKAGE_3_PY2_CR, PACKAGE_3_PY3_CR,
PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
self._store.get_self_compatibilities([PACKAGE_1, PACKAGE_2]),
{
PACKAGE_1: [PACKAGE_1_PY2_CR, PACKAGE_1_PY3_CR],
PACKAGE_2: [PACKAGE_2_PY2_CR]
})
def test_get_self_compatibilities_no_results(self):
crs = [PACKAGE_1_PY2_CR, PACKAGE_1_PY2_OLD_CR, PACKAGE_1_PY3_CR,
PACKAGE_2_PY2_CR,
PACKAGE_3_PY2_CR, PACKAGE_3_PY3_CR,
PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
self._store.get_self_compatibilities(
[PACKAGE_1, PACKAGE_2, PACKAGE_4]),
{
PACKAGE_1: [PACKAGE_1_PY2_CR, PACKAGE_1_PY3_CR],
PACKAGE_2: [PACKAGE_2_PY2_CR],
PACKAGE_4: [],
})
def test_get_pair_compatibility(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY2_OLD_CR,
PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
frozenset(
self._store.get_pair_compatibility([PACKAGE_1, PACKAGE_2])),
frozenset([PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]))
def test_get_pair_compatibility_no_results(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertFalse(
frozenset(
self._store.get_pair_compatibility([PACKAGE_1, PACKAGE_3])))
def test_get_pairwise_compatibility_for_package(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY2_OLD_CR,
PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
actual_results = self._store.get_pairwise_compatibility_for_package(
PACKAGE_1.install_name)
expected_results = {
frozenset([PACKAGE_1, PACKAGE_2]):
[PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]}
self.assertEqual(actual_results.keys(), expected_results.keys())
for actual_key, actual_results in actual_results.items():
self.assertEqual(frozenset(actual_results),
frozenset(expected_results[actual_key]))
def test_get_pairwise_compatibility_for_package_no_results(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY2_OLD_CR,
PACKAGE_1_AND_2_PY3_CR, PACKAGE_3_PY2_CR, PACKAGE_3_PY3_CR]
self._store.save_compatibility_statuses(crs)
actual_results = self._store.get_pairwise_compatibility_for_package(
PACKAGE_3.install_name)
self.assertFalse(actual_results)
def test_get_compatibility_combinations(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY2_OLD_CR,
PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
frozenset(self._store.get_compatibility_combinations(
[PACKAGE_1, PACKAGE_2])),
frozenset({frozenset([PACKAGE_1, PACKAGE_2]): [
PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY3_CR]})
)
def test_get_compatibility_combinations_no_results(self):
crs = [PACKAGE_1_AND_2_PY2_CR, PACKAGE_1_AND_2_PY2_OLD_CR,
PACKAGE_1_AND_2_PY3_CR]
self._store.save_compatibility_statuses(crs)
self.assertEqual(
frozenset(self._store.get_compatibility_combinations(
[PACKAGE_1, PACKAGE_2, PACKAGE_3])),
frozenset({
frozenset([PACKAGE_1, PACKAGE_2]): [PACKAGE_1_AND_2_PY2_CR,
PACKAGE_1_AND_2_PY3_CR],
frozenset([PACKAGE_1, PACKAGE_3]): [],
frozenset([PACKAGE_2, PACKAGE_3]): [],
}))
def test_get_dependency_info(self):
self._store.save_compatibility_statuses(
[PACKAGE_1_PY3_CR_WITH_RECENT_DEPS])
self.assertEqual(
self._store.get_dependency_info('package1'),
RECENT_DEPS_1)
def test_get_dependency_info_old_and_new(self):
self._store.save_compatibility_statuses(
[PACKAGE_1_PY3_CR_WITH_OLD_DEPS_VERS,
PACKAGE_1_PY3_CR_WITH_RECENT_DEPS])
self.assertEqual(
self._store.get_dependency_info('package1'),
RECENT_DEPS_1)
|
jandd/python-pkiutils
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup for pkiutils.
"""
__author__ = 'Jan Dittberner'
from setuptools import setup, find_packages
with open('README.rst', 'r') as readme:
DESCRIPTION = readme.read()
DESCRIPTION += "\n"
with open('LICENSE', 'r') as license:
DESCRIPTION += license.read()
setup(
name='pkiutils',
description='a set of public key infrastructure utilities',
long_description=DESCRIPTION,
install_requires=['pycrypto', 'pyasn1', 'pyasn1_modules', 'netaddr'],
version=0.1,
author=__author__,
author_email='jan@dittberner.info',
url='https://github.com/jandd/python-pkiutils',
packages=find_packages(),
license='MIT',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.