repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Cojacfar/Maker | refs/heads/master | comm/lib/python2.7/site-packages/django/contrib/gis/admin/widgets.py | 111 | import logging
from django.forms.widgets import Textarea
from django.template import loader, Context
from django.utils import six
from django.utils import translation
from django.contrib.gis.gdal import OGRException
from django.contrib.gis.geos import GEOSGeometry, GEOSException
# Creating a template context that contains Django settings
# values needed by admin map templates.
geo_context = Context({'LANGUAGE_BIDI' : translation.get_language_bidi()})
logger = logging.getLogger('django.contrib.gis')
class OpenLayersWidget(Textarea):
"""
Renders an OpenLayers map using the WKT of the geometry.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs: self.params.update(attrs)
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
value = None
if (value and value.geom_type.upper() != self.geom_type and
self.geom_type != 'GEOMETRY'):
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-','_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except OGRException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, srid, err)
)
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
return loader.render_to_string(self.template, self.params,
context_instance=geo_context)
def map_options(self):
"Builds the map options hash for the OpenLayers template."
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % str(extent)
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
|
krsjoseph/youtube-dl | refs/heads/master | youtube_dl/extractor/ntvru.py | 124 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
xpath_text,
int_or_none,
)
class NTVRuIE(InfoExtractor):
IE_NAME = 'ntv.ru'
_VALID_URL = r'http://(?:www\.)?ntv\.ru/(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.ntv.ru/novosti/863142/',
'md5': 'ba7ea172a91cb83eb734cad18c10e723',
'info_dict': {
'id': '746000',
'ext': 'mp4',
'title': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'description': 'Командующий Черноморским флотом провел переговоры в штабе ВМС Украины',
'thumbnail': 're:^http://.*\.jpg',
'duration': 136,
},
},
{
'url': 'http://www.ntv.ru/video/novosti/750370/',
'md5': 'adecff79691b4d71e25220a191477124',
'info_dict': {
'id': '750370',
'ext': 'mp4',
'title': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'description': 'Родные пассажиров пропавшего Boeing не верят в трагический исход',
'thumbnail': 're:^http://.*\.jpg',
'duration': 172,
},
},
{
'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416',
'md5': '82dbd49b38e3af1d00df16acbeab260c',
'info_dict': {
'id': '747480',
'ext': 'mp4',
'title': '«Сегодня». 21 марта 2014 года. 16:00',
'description': '«Сегодня». 21 марта 2014 года. 16:00',
'thumbnail': 're:^http://.*\.jpg',
'duration': 1496,
},
},
{
'url': 'http://www.ntv.ru/kino/Koma_film',
'md5': 'f825770930937aa7e5aca0dc0d29319a',
'info_dict': {
'id': '1007609',
'ext': 'mp4',
'title': 'Остросюжетный фильм «Кома»',
'description': 'Остросюжетный фильм «Кома»',
'thumbnail': 're:^http://.*\.jpg',
'duration': 5592,
},
},
{
'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/',
'md5': '9320cd0e23f3ea59c330dc744e06ff3b',
'info_dict': {
'id': '751482',
'ext': 'mp4',
'title': '«Дело врачей»: «Деревце жизни»',
'description': '«Дело врачей»: «Деревце жизни»',
'thumbnail': 're:^http://.*\.jpg',
'duration': 2590,
},
},
]
_VIDEO_ID_REGEXES = [
r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)',
r'<video embed=[^>]+><id>(\d+)</id>',
r'<video restriction[^>]+><key>(\d+)</key>',
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._html_search_regex(self._VIDEO_ID_REGEXES, webpage, 'video id')
player = self._download_xml(
'http://www.ntv.ru/vi%s/' % video_id,
video_id, 'Downloading video XML')
title = clean_html(xpath_text(player, './data/title', 'title', fatal=True))
description = clean_html(xpath_text(player, './data/description', 'description'))
video = player.find('./data/video')
video_id = xpath_text(video, './id', 'video id')
thumbnail = xpath_text(video, './splash', 'thumbnail')
duration = int_or_none(xpath_text(video, './totaltime', 'duration'))
view_count = int_or_none(xpath_text(video, './views', 'view count'))
token = self._download_webpage(
'http://stat.ntv.ru/services/access/token',
video_id, 'Downloading access token')
formats = []
for format_id in ['', 'hi', 'webm']:
file_ = video.find('./%sfile' % format_id)
if file_ is None:
continue
size = video.find('./%ssize' % format_id)
formats.append({
'url': 'http://media2.ntv.ru/vod/%s&tok=%s' % (file_.text, token),
'filesize': int_or_none(size.text if size is not None else None),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
stephane-martin/salt-debian-packaging | refs/heads/master | salt-2016.3.2/salt/netapi/rest_tornado/saltnado.py | 2 | # encoding: utf-8
from __future__ import absolute_import, print_function
'''
A non-blocking REST API for Salt
================================
.. py:currentmodule:: salt.netapi.rest_tornado.saltnado
:depends: - tornado Python module
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
In order to run rest_tornado with the salt-master
add the following to the Salt master config file.
.. code-block:: yaml
rest_tornado:
# can be any port
port: 8000
# address to bind to (defaults to 0.0.0.0)
address: 0.0.0.0
# socket backlog
backlog: 128
ssl_crt: /etc/pki/api/certs/server.crt
# no need to specify ssl_key if cert and key
# are in one single file
ssl_key: /etc/pki/api/certs/server.key
debug: False
disable_ssl: False
webhook_disable_auth: False
cors_origin: null
.. _rest_tornado-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`SaltAuthHandler` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
.. seealso:: You can bypass the session handling via the :py:class:`RunSaltAPIHandler` URL.
CORS
----
rest_tornado supports Cross-site HTTP requests out of the box. It is by default
deactivated and controlled by the `cors_origin` config key.
You can allow all origins by settings `cors_origin` to `*`.
You can allow only one origin with this configuration:
.. code-block:: yaml
rest_tornado:
cors_origin: http://salt.yourcompany.com
You can also be more specific and select only a few allowed origins by using
a list. For example:
.. code-block:: yaml
rest_tornado:
cors_origin:
- http://salt.yourcompany.com
- http://salt-preprod.yourcampany.com
The format for origin are full URL, with both scheme and port if not standard.
In this case, rest_tornado will check if the Origin header is in the allowed
list if it's the case allow the origin. Else it will returns nothing,
effectively preventing the origin to make request.
For reference, CORS is a mechanism used by browser to allow (or disallow)
requests made from browser from a different origin than salt-api. It's
complementary to Authentication and mandatory only if you plan to use
a salt client developed as a Javascript browser application.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credentials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
The following example (in JSON format) causes Salt to execute two commands::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
Multiple commands in a Salt API request will be executed in serial and makes
no gaurantees that all commands will run. Meaning that if test.fib (from the
example above) had an exception, the API would still execute "jobs.lookup_jid".
Responses to these lowstates are an in-order list of dicts containing the
return data, a yaml response could look like::
- ms-1: true
ms-2: true
- ms-1: foo
ms-2: bar
In the event of an exception while executing a command the return for that lowstate
will be a string, for example if no minions matched the first lowstate we would get
a return like::
- No minions matched the target. No command was sent, no jid was assigned.
- ms-1: true
ms-2: true
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~SaltAuthHandler`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
.. |500| replace:: internal server error
''' # pylint: disable=W0105
# pylint: disable=W0232
# Import Python libs
import time
import math
import fnmatch
import logging
from copy import copy
from collections import defaultdict
# pylint: disable=import-error
import cgi
import yaml
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.concurrent import Future
from zmq.eventloop import ioloop
import salt.ext.six as six
# pylint: enable=import-error
# instantiate the zmq IOLoop (specialized poller)
ioloop.install()
# salt imports
import salt.netapi
import salt.utils
import salt.utils.event
from salt.utils.event import tagify
import salt.client
import salt.runner
import salt.auth
from salt.exceptions import EauthAuthenticationError
json = salt.utils.import_json()
logger = logging.getLogger()
# The clients rest_cherrypi supports. We want to mimic the interface, but not
# necessarily use the same API under the hood
# # all of these require coordinating minion stuff
# - "local" (done)
# - "local_async" (done)
# - "local_batch" (done)
# # master side
# - "runner" (done)
# - "wheel" (need async api...)
class SaltClientsMixIn(object):
'''
MixIn class to container all of the salt clients that the API needs
'''
# TODO: load this proactively, instead of waiting for a request
__saltclients = None
@property
def saltclients(self):
if SaltClientsMixIn.__saltclients is None:
local_client = salt.client.get_local_client(mopts=self.application.opts)
# TODO: refreshing clients using cachedict
SaltClientsMixIn.__saltclients = {
'local': local_client.run_job,
# not the actual client we'll use.. but its what we'll use to get args
'local_batch': local_client.cmd_batch,
'local_async': local_client.run_job,
'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
'runner_async': None, # empty, since we use the same client as `runner`
}
return SaltClientsMixIn.__saltclients
AUTH_TOKEN_HEADER = 'X-Auth-Token'
AUTH_COOKIE_NAME = 'session_id'
class TimeoutException(Exception):
pass
class Any(Future):
'''
Future that wraps other futures to "block" until one is done
'''
def __init__(self, futures): # pylint: disable=E1002
super(Any, self).__init__()
for future in futures:
future.add_done_callback(self.done_callback)
def done_callback(self, future):
# Any is completed once one is done, we don't set for the rest
if not self.done():
self.set_result(future)
class EventListener(object):
'''
Class responsible for listening to the salt master event bus and updating
futures. This is the core of what makes this async, this allows us to do
non-blocking work in the main processes and "wait" for an event to happen
'''
def __init__(self, mod_opts, opts):
self.mod_opts = mod_opts
self.opts = opts
self.event = salt.utils.event.get_event(
'master',
opts['sock_dir'],
opts['transport'],
opts=opts,
listen=True,
io_loop=tornado.ioloop.IOLoop.current()
)
# tag -> list of futures
self.tag_map = defaultdict(list)
# request_obj -> list of (tag, future)
self.request_map = defaultdict(list)
# map of future -> timeout_callback
self.timeout_map = {}
self.event.set_event_handler(self._handle_event_socket_recv)
def clean_timeout_futures(self, request):
'''
Remove all futures that were waiting for request `request` since it is done waiting
'''
if request not in self.request_map:
return
for tag, future in self.request_map[request]:
# timeout the future
self._timeout_future(tag, future)
# remove the timeout
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
del self.timeout_map[future]
del self.request_map[request]
def get_event(self,
request,
tag='',
callback=None,
timeout=None
):
'''
Get an event (async of course) return a future that will get it later
'''
# if the request finished, no reason to allow event fetching, since we
# can't send back to the client
if request._finished:
future = Future()
future.set_exception(TimeoutException())
return future
future = Future()
if callback is not None:
def handle_future(future):
tornado.ioloop.IOLoop.current().add_callback(callback, future)
future.add_done_callback(handle_future)
# add this tag and future to the callbacks
self.tag_map[tag].append(future)
self.request_map[request].append((tag, future))
if timeout:
timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future)
self.timeout_map[future] = timeout_future
return future
def _timeout_future(self, tag, future):
'''
Timeout a specific future
'''
if tag not in self.tag_map:
return
if not future.done():
future.set_exception(TimeoutException())
self.tag_map[tag].remove(future)
if len(self.tag_map[tag]) == 0:
del self.tag_map[tag]
def _handle_event_socket_recv(self, raw):
'''
Callback for events on the event sub socket
'''
mtag, data = self.event.unpack(raw, self.event.serial)
# see if we have any futures that need this info:
for tag_prefix, futures in six.iteritems(self.tag_map):
if mtag.startswith(tag_prefix):
for future in futures:
if future.done():
continue
future.set_result({'data': data, 'tag': mtag})
self.tag_map[tag_prefix].remove(future)
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
del self.timeout_map[future]
# TODO: move to a utils function within salt-- the batching stuff is a bit tied together
def get_batch_size(batch, num_minions):
'''
Return the batch size that you should have
batch: string
num_minions: int
'''
# figure out how many we can keep in flight
partition = lambda x: float(x) / 100.0 * num_minions
try:
if '%' in batch:
res = partition(float(batch.strip('%')))
if res < 1:
return int(math.ceil(res))
else:
return int(res)
else:
return int(batch)
except ValueError:
print(('Invalid batch data sent: {0}\nData must be in the form'
'of %10, 10% or 3').format(batch))
class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', yaml.safe_dump),
)
def _verify_client(self, client):
'''
Verify that the client is in fact one we have
'''
if client not in self.saltclients:
self.set_status(400)
self.write("400 Invalid Client: Client not found in salt clients")
self.finish()
def initialize(self):
'''
Initialize the handler before requests are called
'''
if not hasattr(self.application, 'event_listener'):
logger.critical('init a listener')
self.application.event_listener = EventListener(
self.application.mod_opts,
self.application.opts,
)
@property
def token(self):
'''
The token used for the request
'''
# find the token (cookie or headers)
if AUTH_TOKEN_HEADER in self.request.headers:
return self.request.headers[AUTH_TOKEN_HEADER]
else:
return self.get_cookie(AUTH_COOKIE_NAME)
def _verify_auth(self):
'''
Boolean whether the request is auth'd
'''
return self.token and bool(self.application.auth.get_tok(self.token))
def prepare(self):
'''
Run before get/posts etc. Pre-flight checks:
- verify that we can speak back to them (compatible accept header)
'''
# Find an acceptable content-type
accept_header = self.request.headers.get('Accept', '*/*')
# Ignore any parameter, including q (quality) one
parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')]
def find_acceptable_content_type(parsed_accept_header):
for media_range in parsed_accept_header:
for content_type, dumper in self.ct_out_map:
if fnmatch.fnmatch(content_type, media_range):
return content_type, dumper
return None, None
content_type, dumper = find_acceptable_content_type(parsed_accept_header)
# better return message?
if not content_type:
self.send_error(406)
self.content_type = content_type
self.dumper = dumper
# do the common parts
self.start = time.time()
self.connected = True
self.lowstate = self._get_lowstate()
def timeout_futures(self):
'''
timeout a session
'''
# TODO: set a header or something??? so we know it was a timeout
self.application.event_listener.clean_timeout_futures(self)
def on_finish(self):
'''
When the job has been done, lets cleanup
'''
# timeout all the futures
self.timeout_futures()
def on_connection_close(self):
'''
If the client disconnects, lets close out
'''
self.finish()
def serialize(self, data):
'''
Serlialize the output based on the Accept header
'''
self.set_header('Content-Type', self.content_type)
return self.dumper(data)
def _form_loader(self, _):
'''
function to get the data from the urlencoded forms
ignore the data passed in and just get the args from wherever they are
'''
data = {}
for key, val in six.iteritems(self.request.arguments):
if len(val) == 1:
data[key] = val[0]
else:
data[key] = val
return data
def deserialize(self, data):
'''
Deserialize the data based on request content type headers
'''
ct_in_map = {
'application/x-www-form-urlencoded': self._form_loader,
'application/json': json.loads,
'application/x-yaml': yaml.safe_load,
'text/yaml': yaml.safe_load,
# because people are terrible and don't mean what they say
'text/plain': json.loads
}
try:
# Use cgi.parse_header to correctly separate parameters from value
header = cgi.parse_header(self.request.headers['Content-Type'])
value, parameters = header
return ct_in_map[value](data)
except KeyError:
self.send_error(406)
except ValueError:
self.send_error(400)
def _get_lowstate(self):
'''
Format the incoming data into a lowstate object
'''
if not self.request.body:
return
data = self.deserialize(self.request.body)
self.raw_data = copy(data)
if self.request.headers.get('Content-Type') == 'application/x-www-form-urlencoded':
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
lowstate = [data]
else:
lowstate = data
return lowstate
def set_default_headers(self):
'''
Set default CORS headers
'''
mod_opts = self.application.mod_opts
if mod_opts.get('cors_origin'):
origin = self.request.headers.get('Origin')
allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin'])
if allowed_origin:
self.set_header("Access-Control-Allow-Origin", allowed_origin)
def options(self, *args, **kwargs):
'''
Return CORS headers for preflight requests
'''
# Allow X-Auth-Token in requests
request_headers = self.request.headers.get('Access-Control-Request-Headers')
allowed_headers = request_headers.split(',')
# Filter allowed header here if needed.
# Allow request headers
self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers))
# Allow X-Auth-Token in responses
self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token')
# Allow all methods
self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')
self.set_status(204)
self.finish()
class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Handler for login requests
'''
def get(self):
'''
All logins are done over post, this is a parked enpoint
.. http:get:: /login
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Content-Length: 58
{"status": "401 Unauthorized", "return": "Please log in"}
'''
self.set_status(401)
self.set_header('WWW-Authenticate', 'Session')
ret = {'status': '401 Unauthorized',
'return': 'Please log in'}
self.write(self.serialize(ret))
# TODO: make async? Underlying library isn't... and we ARE making disk calls :(
def post(self):
'''
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
try:
request_payload = self.deserialize(self.request.body)
if not isinstance(request_payload, dict):
self.send_error(400)
return
creds = {'username': request_payload['username'],
'password': request_payload['password'],
'eauth': request_payload['eauth'],
}
# if any of the args are missing, its a bad request
except KeyError:
self.send_error(400)
return
token = self.application.auth.mk_token(creds)
if 'token' not in token:
# TODO: nicer error message
# 'Could not authenticate using provided credentials')
self.send_error(401)
# return since we don't want to execute any more
return
# Grab eauth config for the current backend for the current user
try:
perms = self.application.opts['external_auth'][token['eauth']][token['name']]
# If we can't find the creds, then they aren't authorized
except KeyError:
self.send_error(401)
return
except (AttributeError, IndexError):
logging.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
# TODO better error -- 'Configuration for external_auth could not be read.'
self.send_error(500)
return
ret = {'return': [{
'token': token['token'],
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
self.write(self.serialize(ret))
class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W0223
'''
Main API handler for base "/"
'''
def get(self):
'''
An enpoint to determine salt-api capabilities
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Legnth: 83
{"clients": ["local", "local_batch", "local_async", "runner", "runner_async"], "return": "Welcome"}
'''
ret = {"clients": list(self.saltclients.keys()),
"return": "Welcome"}
self.write(self.serialize(ret))
@tornado.web.asynchronous
def post(self):
'''
Send one or more Salt commands (lowstates) in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response:**
Responses are an in-order list of the lowstate's return data. In the
event of an exception running a command the return will be a string
instead of a mapping.
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
.. admonition:: multiple commands
Note that if multiple :term:`lowstate` structures are sent, the Salt
API will execute them in serial, and will not stop execution upon failure
of a previous job. If you need to have commands executed in order and
stop on failure please use compount-command-execution.
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
self.disbatch()
@tornado.gen.coroutine
def disbatch(self):
'''
Disbatch all lowstates to the appropriate clients
'''
ret = []
# check clients before going, we want to throw 400 if one is bad
for low in self.lowstate:
client = low.get('client')
self._verify_client(client)
# Make sure we have 'token' or 'username'/'password' in each low chunk.
# Salt will verify the credentials are correct.
if self.token is not None and 'token' not in low:
low['token'] = self.token
if not (('token' in low)
or ('username' in low and 'password' in low and 'eauth' in low)):
ret.append('Failed to authenticate')
break
# disbatch to the correct handler
try:
chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low)
ret.append(chunk_ret)
except EauthAuthenticationError as exc:
ret.append('Failed to authenticate')
break
except Exception as ex:
ret.append('Unexpected exception while handling request: {0}'.format(ex))
logger.error('Unexpected exception while handling request:', exc_info=True)
self.write(self.serialize({'return': ret}))
self.finish()
@tornado.gen.coroutine
def _disbatch_local_batch(self, chunk):
'''
Disbatch local client batched commands
'''
f_call = salt.utils.format_call(self.saltclients['local_batch'], chunk)
# ping all the minions (to see who we have to talk to)
# Don't catch any exception, since we won't know what to do, we'll
# let the upper level deal with this one
ping_ret = yield self._disbatch_local({'tgt': chunk['tgt'],
'fun': 'test.ping',
'expr_form': f_call['kwargs']['expr_form']})
chunk_ret = {}
if not isinstance(ping_ret, dict):
raise tornado.gen.Return(chunk_ret)
minions = list(ping_ret.keys())
maxflight = get_batch_size(f_call['kwargs']['batch'], len(minions))
inflight_futures = []
# override the expr_form
f_call['kwargs']['expr_form'] = 'list'
# do this batch
while len(minions) > 0 or len(inflight_futures) > 0:
# if you have more to go, lets disbatch jobs
while len(inflight_futures) < maxflight and len(minions) > 0:
minion_id = minions.pop(0)
batch_chunk = dict(chunk)
batch_chunk['tgt'] = [minion_id]
batch_chunk['expr_form'] = 'list'
future = self._disbatch_local(batch_chunk)
inflight_futures.append(future)
# if we have nothing to wait for, don't wait
if len(inflight_futures) == 0:
continue
# wait until someone is done
finished_future = yield Any(inflight_futures)
try:
b_ret = finished_future.result()
except TimeoutException:
break
chunk_ret.update(b_ret)
inflight_futures.remove(finished_future)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def _disbatch_local(self, chunk):
'''
Dispatch local client commands
'''
chunk_ret = {}
f_call = salt.utils.format_call(self.saltclients['local'], chunk)
# fire a job off
try:
pub_data = self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
except EauthAuthenticationError:
raise tornado.gen.Return('Not authorized to run this job')
# if the job didn't publish, lets not wait around for nothing
# TODO: set header??
if 'jid' not in pub_data:
raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.')
# seed minions_remaining with the pub_data
minions_remaining = pub_data['minions']
syndic_min_wait = None
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
job_not_running = self.job_not_running(pub_data['jid'],
chunk['tgt'],
f_call['kwargs']['expr_form'],
minions_remaining=minions_remaining
)
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
# we are completed when either all minions return or the job isn't running anywhere
chunk_ret = yield self.all_returns(pub_data['jid'],
finish_futures=[job_not_running],
minions_remaining=minions_remaining,
)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def all_returns(self,
jid,
finish_futures=None,
minions_remaining=None,
):
'''
Return a future which will complete once all returns are completed
(according to minions_remaining), or one of the passed in "finish_futures" completes
'''
if finish_futures is None:
finish_futures = []
if minions_remaining is None:
minions_remaining = []
ret_tag = tagify([jid, 'ret'], 'job')
chunk_ret = {}
while True:
ret_event = self.application.event_listener.get_event(self,
tag=ret_tag,
)
f = yield Any([ret_event] + finish_futures)
if f in finish_futures:
raise tornado.gen.Return(chunk_ret)
event = f.result()
chunk_ret[event['data']['id']] = event['data']['return']
# its possible to get a return that wasn't in the minion_remaining list
try:
minions_remaining.remove(event['data']['id'])
except ValueError:
pass
if len(minions_remaining) == 0:
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def job_not_running(self,
jid,
tgt,
tgt_type,
minions_remaining=None,
):
'''
Return a future which will complete once jid (passed in) is no longer
running on tgt
'''
if minions_remaining is None:
minions_remaining = []
ping_pub_data = self.saltclients['local'](tgt,
'saltutil.find_job',
[jid],
expr_form=tgt_type)
ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job')
minion_running = False
while True:
try:
event = yield self.application.event_listener.get_event(self,
tag=ping_tag,
timeout=self.application.opts['gather_job_timeout'],
)
except TimeoutException:
if not minion_running:
raise tornado.gen.Return(True)
else:
ping_pub_data = self.saltclients['local'](tgt,
'saltutil.find_job',
[jid],
expr_form=tgt_type)
ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job')
minion_running = False
continue
# Minions can return, we want to see if the job is running...
if event['data'].get('return', {}) == {}:
continue
minion_running = True
id_ = event['data']['id']
if id_ not in minions_remaining:
minions_remaining.append(event['data']['id'])
@tornado.gen.coroutine
def _disbatch_local_async(self, chunk):
'''
Disbatch local client_async commands
'''
f_call = salt.utils.format_call(self.saltclients['local_async'], chunk)
# fire a job off
pub_data = self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
raise tornado.gen.Return(pub_data)
@tornado.gen.coroutine
def _disbatch_runner(self, chunk):
'''
Disbatch runner client commands
'''
pub_data = self.saltclients['runner'](chunk)
tag = pub_data['tag'] + '/ret'
try:
event = yield self.application.event_listener.get_event(self, tag=tag)
# only return the return data
raise tornado.gen.Return(event['data']['return'])
except TimeoutException:
raise tornado.gen.Return('Timeout waiting for runner to execute')
@tornado.gen.coroutine
def _disbatch_runner_async(self, chunk):
'''
Disbatch runner client_async commands
'''
pub_data = self.saltclients['runner'](chunk)
raise tornado.gen.Return(pub_data)
class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A convenience endpoint for minion related functions
'''
@tornado.web.asynchronous
def get(self, mid=None): # pylint: disable=W0221
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
self.lowstate = [{
'client': 'local',
'tgt': mid or '*',
'fun': 'grains.items',
}]
self.disbatch()
@tornado.web.asynchronous
def post(self):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
# verify that all lowstates are the correct client type
for low in self.lowstate:
# if you didn't specify, its fine
if 'client' not in low:
low['client'] = 'local_async'
continue
# if you specified something else, we don't do that
if low.get('client') != 'local_async':
self.set_status(400)
self.write('We don\'t serve your kind here')
self.finish()
return
self.disbatch()
class JobsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A convenience endpoint for job cache data
'''
@tornado.web.asynchronous
def get(self, jid=None): # pylint: disable=W0221
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
if jid:
self.lowstate = [{
'fun': 'jobs.list_job',
'jid': jid,
'client': 'runner',
}]
else:
self.lowstate = [{
'fun': 'jobs.list_jobs',
'client': 'runner',
}]
self.disbatch()
class RunSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
Endpoint to run commands without normal session handling
'''
@tornado.web.asynchronous
def post(self):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
self.disbatch()
class EventsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
@tornado.gen.coroutine
def get(self):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# if you aren't authenticated, redirect to login
if not self._verify_auth():
self.redirect('/login')
return
# set the streaming headers
self.set_header('Content-Type', 'text/event-stream')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Connection', 'keep-alive')
self.write(u'retry: {0}\n'.format(400))
self.flush()
while True:
try:
event = yield self.application.event_listener.get_event(self)
self.write(u'tag: {0}\n'.format(event.get('tag', '')))
self.write(u'data: {0}\n\n'.format(json.dumps(event)))
self.flush()
except TimeoutException:
break
class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"'
.. seealso:: :ref:`events`, :ref:`reactor`
'''
def post(self, tag_suffix=None): # pylint: disable=W0221
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``http://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
disable_auth = self.application.mod_opts.get('webhook_disable_auth')
if not disable_auth and not self._verify_auth():
self.redirect('/login')
return
# if you have the tag, prefix
tag = 'salt/netapi/hook'
if tag_suffix:
tag += tag_suffix
# TODO: consolidate??
self.event = salt.utils.event.get_event(
'master',
self.application.opts['sock_dir'],
self.application.opts['transport'],
opts=self.application.opts,
listen=False)
ret = self.event.fire_event({
'post': self.raw_data,
'get': dict(self.request.query_arguments),
# In Tornado >= v4.0.3, the headers come
# back as an HTTPHeaders instance, which
# is a dictionary. We must cast this as
# a dictionary in order for msgpack to
# serialize it.
'headers': dict(self.request.headers),
}, tag)
self.write(self.serialize({'success': ret}))
def _check_cors_origin(origin, allowed_origins):
"""
Check if an origin match cors allowed origins
"""
if isinstance(allowed_origins, list):
if origin in allowed_origins:
return origin
elif allowed_origins == '*':
return allowed_origins
elif allowed_origins == origin:
# Cors origin is either * or specific origin
return allowed_origins
|
goodwinnk/intellij-community | refs/heads/master | python/testData/typesFromAttributes/cyclicInheritance/module.py | 79 | from main import B
class A(B):
pass
|
kirananto/RAZORMOTOG3 | refs/heads/cm-12.1 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
Bolton-and-Menk-GIS/photos-to-map | refs/heads/master | PhotosToMap/photomapper/gpsimage/PIL/PngImagePlugin.py | 40 | #
# The Python Imaging Library.
# $Id$
#
# PNG support code
#
# See "PNG (Portable Network Graphics) Specification, version 1.0;
# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.).
#
# history:
# 1996-05-06 fl Created (couldn't resist it)
# 1996-12-14 fl Upgraded, added read and verify support (0.2)
# 1996-12-15 fl Separate PNG stream parser
# 1996-12-29 fl Added write support, added getchunks
# 1996-12-30 fl Eliminated circular references in decoder (0.3)
# 1998-07-12 fl Read/write 16-bit images as mode I (0.4)
# 2001-02-08 fl Added transparency support (from Zircon) (0.5)
# 2001-04-16 fl Don't close data source in "open" method (0.6)
# 2004-02-24 fl Don't even pretend to support interlaced files (0.7)
# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8)
# 2004-09-20 fl Added PngInfo chunk container
# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev)
# 2008-08-13 fl Added tRNS support for RGB images
# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech)
# 2009-03-08 fl Added zTXT support (from Lowell Alleman)
# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua)
#
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.9"
import re, string
import Image, ImageFile, ImagePalette, zlib
def i16(c):
return ord(c[1]) + (ord(c[0])<<8)
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
is_cid = re.compile("\w\w\w\w").match
_MAGIC = "\211PNG\r\n\032\n"
_MODES = {
# supported bits/color combinations, and corresponding modes/rawmodes
(1, 0): ("1", "1"),
(2, 0): ("L", "L;2"),
(4, 0): ("L", "L;4"),
(8, 0): ("L", "L"),
(16,0): ("I", "I;16B"),
(8, 2): ("RGB", "RGB"),
(16,2): ("RGB", "RGB;16B"),
(1, 3): ("P", "P;1"),
(2, 3): ("P", "P;2"),
(4, 3): ("P", "P;4"),
(8, 3): ("P", "P"),
(8, 4): ("LA", "LA"),
(16,4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available
(8, 6): ("RGBA", "RGBA"),
(16,6): ("RGBA", "RGBA;16B"),
}
# --------------------------------------------------------------------
# Support classes. Suitable for PNG and related formats like MNG etc.
class ChunkStream:
def __init__(self, fp):
self.fp = fp
self.queue = []
if not hasattr(Image.core, "crc32"):
self.crc = self.crc_skip
def read(self):
"Fetch a new chunk. Returns header information."
if self.queue:
cid, pos, len = self.queue[-1]
del self.queue[-1]
self.fp.seek(pos)
else:
s = self.fp.read(8)
cid = s[4:]
pos = self.fp.tell()
len = i32(s)
if not is_cid(cid):
raise SyntaxError, "broken PNG file (chunk %s)" % repr(cid)
return cid, pos, len
def close(self):
self.queue = self.crc = self.fp = None
def push(self, cid, pos, len):
self.queue.append((cid, pos, len))
def call(self, cid, pos, len):
"Call the appropriate chunk handler"
if Image.DEBUG:
print "STREAM", cid, pos, len
return getattr(self, "chunk_" + cid)(pos, len)
def crc(self, cid, data):
"Read and verify checksum"
crc1 = Image.core.crc32(data, Image.core.crc32(cid))
crc2 = i16(self.fp.read(2)), i16(self.fp.read(2))
if crc1 != crc2:
raise SyntaxError, "broken PNG file"\
"(bad header checksum in %s)" % cid
def crc_skip(self, cid, data):
"Read checksum. Used if the C module is not present"
self.fp.read(4)
def verify(self, endchunk = "IEND"):
# Simple approach; just calculate checksum for all remaining
# blocks. Must be called directly after open.
cids = []
while 1:
cid, pos, len = self.read()
if cid == endchunk:
break
self.crc(cid, ImageFile._safe_read(self.fp, len))
cids.append(cid)
return cids
# --------------------------------------------------------------------
# PNG chunk container (for use with save(pnginfo=))
class PngInfo:
def __init__(self):
self.chunks = []
def add(self, cid, data):
self.chunks.append((cid, data))
def add_text(self, key, value, zip=0):
if zip:
import zlib
self.add("zTXt", key + "\0\0" + zlib.compress(value))
else:
self.add("tEXt", key + "\0" + value)
# --------------------------------------------------------------------
# PNG image stream (IHDR/IEND)
class PngStream(ChunkStream):
def __init__(self, fp):
ChunkStream.__init__(self, fp)
# local copies of Image attributes
self.im_info = {}
self.im_text = {}
self.im_size = (0,0)
self.im_mode = None
self.im_tile = None
self.im_palette = None
def chunk_iCCP(self, pos, len):
# ICC profile
s = ImageFile._safe_read(self.fp, len)
# according to PNG spec, the iCCP chunk contains:
# Profile name 1-79 bytes (character string)
# Null separator 1 byte (null character)
# Compression method 1 byte (0)
# Compressed profile n bytes (zlib with deflate compression)
i = string.find(s, chr(0))
if Image.DEBUG:
print "iCCP profile name", s[:i]
print "Compression method", ord(s[i])
comp_method = ord(s[i])
if comp_method != 0:
raise SyntaxError("Unknown compression method %s in iCCP chunk" % comp_method)
try:
icc_profile = zlib.decompress(s[i+2:])
except zlib.error:
icc_profile = None # FIXME
self.im_info["icc_profile"] = icc_profile
return s
def chunk_IHDR(self, pos, len):
# image header
s = ImageFile._safe_read(self.fp, len)
self.im_size = i32(s), i32(s[4:])
try:
self.im_mode, self.im_rawmode = _MODES[(ord(s[8]), ord(s[9]))]
except:
pass
if ord(s[12]):
self.im_info["interlace"] = 1
if ord(s[11]):
raise SyntaxError, "unknown filter category"
return s
def chunk_IDAT(self, pos, len):
# image data
self.im_tile = [("zip", (0,0)+self.im_size, pos, self.im_rawmode)]
self.im_idat = len
raise EOFError
def chunk_IEND(self, pos, len):
# end of PNG image
raise EOFError
def chunk_PLTE(self, pos, len):
# palette
s = ImageFile._safe_read(self.fp, len)
if self.im_mode == "P":
self.im_palette = "RGB", s
return s
def chunk_tRNS(self, pos, len):
# transparency
s = ImageFile._safe_read(self.fp, len)
if self.im_mode == "P":
i = string.find(s, chr(0))
if i >= 0:
self.im_info["transparency"] = i
elif self.im_mode == "L":
self.im_info["transparency"] = i16(s)
elif self.im_mode == "RGB":
self.im_info["transparency"] = i16(s), i16(s[2:]), i16(s[4:])
return s
def chunk_gAMA(self, pos, len):
# gamma setting
s = ImageFile._safe_read(self.fp, len)
self.im_info["gamma"] = i32(s) / 100000.0
return s
def chunk_pHYs(self, pos, len):
# pixels per unit
s = ImageFile._safe_read(self.fp, len)
px, py = i32(s), i32(s[4:])
unit = ord(s[8])
if unit == 1: # meter
dpi = int(px * 0.0254 + 0.5), int(py * 0.0254 + 0.5)
self.im_info["dpi"] = dpi
elif unit == 0:
self.im_info["aspect"] = px, py
return s
def chunk_tEXt(self, pos, len):
# text
s = ImageFile._safe_read(self.fp, len)
try:
k, v = string.split(s, "\0", 1)
except ValueError:
k = s; v = "" # fallback for broken tEXt tags
if k:
self.im_info[k] = self.im_text[k] = v
return s
def chunk_zTXt(self, pos, len):
# compressed text
s = ImageFile._safe_read(self.fp, len)
k, v = string.split(s, "\0", 1)
comp_method = ord(v[0])
if comp_method != 0:
raise SyntaxError("Unknown compression method %s in zTXt chunk" % comp_method)
import zlib
self.im_info[k] = self.im_text[k] = zlib.decompress(v[1:])
return s
# --------------------------------------------------------------------
# PNG reader
def _accept(prefix):
return prefix[:8] == _MAGIC
##
# Image plugin for PNG images.
class PngImageFile(ImageFile.ImageFile):
format = "PNG"
format_description = "Portable network graphics"
def _open(self):
if self.fp.read(8) != _MAGIC:
raise SyntaxError, "not a PNG file"
#
# Parse headers up to the first IDAT chunk
self.png = PngStream(self.fp)
while 1:
#
# get next chunk
cid, pos, len = self.png.read()
try:
s = self.png.call(cid, pos, len)
except EOFError:
break
except AttributeError:
if Image.DEBUG:
print cid, pos, len, "(unknown)"
s = ImageFile._safe_read(self.fp, len)
self.png.crc(cid, s)
#
# Copy relevant attributes from the PngStream. An alternative
# would be to let the PngStream class modify these attributes
# directly, but that introduces circular references which are
# difficult to break if things go wrong in the decoder...
# (believe me, I've tried ;-)
self.mode = self.png.im_mode
self.size = self.png.im_size
self.info = self.png.im_info
self.text = self.png.im_text # experimental
self.tile = self.png.im_tile
if self.png.im_palette:
rawmode, data = self.png.im_palette
self.palette = ImagePalette.raw(rawmode, data)
self.__idat = len # used by load_read()
def verify(self):
"Verify PNG file"
if self.fp is None:
raise RuntimeError("verify must be called directly after open")
# back up to beginning of IDAT block
self.fp.seek(self.tile[0][2] - 8)
self.png.verify()
self.png.close()
self.fp = None
def load_prepare(self):
"internal: prepare to read PNG file"
if self.info.get("interlace"):
self.decoderconfig = self.decoderconfig + (1,)
ImageFile.ImageFile.load_prepare(self)
def load_read(self, bytes):
"internal: read more image data"
while self.__idat == 0:
# end of chunk, skip forward to next one
self.fp.read(4) # CRC
cid, pos, len = self.png.read()
if cid not in ["IDAT", "DDAT"]:
self.png.push(cid, pos, len)
return ""
self.__idat = len # empty chunks are allowed
# read more data from this chunk
if bytes <= 0:
bytes = self.__idat
else:
bytes = min(bytes, self.__idat)
self.__idat = self.__idat - bytes
return self.fp.read(bytes)
def load_end(self):
"internal: finished reading image data"
self.png.close()
self.png = None
# --------------------------------------------------------------------
# PNG writer
def o16(i):
return chr(i>>8&255) + chr(i&255)
def o32(i):
return chr(i>>24&255) + chr(i>>16&255) + chr(i>>8&255) + chr(i&255)
_OUTMODES = {
# supported PIL modes, and corresponding rawmodes/bits/color combinations
"1": ("1", chr(1)+chr(0)),
"L;1": ("L;1", chr(1)+chr(0)),
"L;2": ("L;2", chr(2)+chr(0)),
"L;4": ("L;4", chr(4)+chr(0)),
"L": ("L", chr(8)+chr(0)),
"LA": ("LA", chr(8)+chr(4)),
"I": ("I;16B", chr(16)+chr(0)),
"P;1": ("P;1", chr(1)+chr(3)),
"P;2": ("P;2", chr(2)+chr(3)),
"P;4": ("P;4", chr(4)+chr(3)),
"P": ("P", chr(8)+chr(3)),
"RGB": ("RGB", chr(8)+chr(2)),
"RGBA":("RGBA", chr(8)+chr(6)),
}
def putchunk(fp, cid, *data):
"Write a PNG chunk (including CRC field)"
data = string.join(data, "")
fp.write(o32(len(data)) + cid)
fp.write(data)
hi, lo = Image.core.crc32(data, Image.core.crc32(cid))
fp.write(o16(hi) + o16(lo))
class _idat:
# wrap output from the encoder in IDAT chunks
def __init__(self, fp, chunk):
self.fp = fp
self.chunk = chunk
def write(self, data):
self.chunk(self.fp, "IDAT", data)
def _save(im, fp, filename, chunk=putchunk, check=0):
# save an image to disk (called by the save method)
mode = im.mode
if mode == "P":
#
# attempt to minimize storage requirements for palette images
if im.encoderinfo.has_key("bits"):
# number of bits specified by user
n = 1 << im.encoderinfo["bits"]
else:
# check palette contents
n = 256 # FIXME
if n <= 2:
bits = 1
elif n <= 4:
bits = 2
elif n <= 16:
bits = 4
else:
bits = 8
if bits != 8:
mode = "%s;%d" % (mode, bits)
# encoder options
if im.encoderinfo.has_key("dictionary"):
dictionary = im.encoderinfo["dictionary"]
else:
dictionary = ""
im.encoderconfig = (im.encoderinfo.has_key("optimize"), dictionary)
# get the corresponding PNG mode
try:
rawmode, mode = _OUTMODES[mode]
except KeyError:
raise IOError, "cannot write mode %s as PNG" % mode
if check:
return check
#
# write minimal PNG file
fp.write(_MAGIC)
chunk(fp, "IHDR",
o32(im.size[0]), o32(im.size[1]), # 0: size
mode, # 8: depth/type
chr(0), # 10: compression
chr(0), # 11: filter category
chr(0)) # 12: interlace flag
if im.mode == "P":
chunk(fp, "PLTE", im.im.getpalette("RGB"))
if im.encoderinfo.has_key("transparency"):
if im.mode == "P":
transparency = max(0, min(255, im.encoderinfo["transparency"]))
chunk(fp, "tRNS", chr(255) * transparency + chr(0))
elif im.mode == "L":
transparency = max(0, min(65535, im.encoderinfo["transparency"]))
chunk(fp, "tRNS", o16(transparency))
elif im.mode == "RGB":
red, green, blue = im.encoderinfo["transparency"]
chunk(fp, "tRNS", o16(red) + o16(green) + o16(blue))
else:
raise IOError("cannot use transparency for this mode")
if 0:
# FIXME: to be supported some day
chunk(fp, "gAMA", o32(int(gamma * 100000.0)))
dpi = im.encoderinfo.get("dpi")
if dpi:
chunk(fp, "pHYs",
o32(int(dpi[0] / 0.0254 + 0.5)),
o32(int(dpi[1] / 0.0254 + 0.5)),
chr(1))
info = im.encoderinfo.get("pnginfo")
if info:
for cid, data in info.chunks:
chunk(fp, cid, data)
# ICC profile writing support -- 2008-06-06 Florian Hoech
if im.info.has_key("icc_profile"):
# ICC profile
# according to PNG spec, the iCCP chunk contains:
# Profile name 1-79 bytes (character string)
# Null separator 1 byte (null character)
# Compression method 1 byte (0)
# Compressed profile n bytes (zlib with deflate compression)
try:
import ICCProfile
p = ICCProfile.ICCProfile(im.info["icc_profile"])
name = p.tags.desc.get("ASCII", p.tags.desc.get("Unicode", p.tags.desc.get("Macintosh", p.tags.desc.get("en", {}).get("US", "ICC Profile")))).encode("latin1", "replace")[:79]
except ImportError:
name = "ICC Profile"
data = name + "\0\0" + zlib.compress(im.info["icc_profile"])
chunk(fp, "iCCP", data)
ImageFile._save(im, _idat(fp, chunk), [("zip", (0,0)+im.size, 0, rawmode)])
chunk(fp, "IEND", "")
try:
fp.flush()
except:
pass
# --------------------------------------------------------------------
# PNG chunk converter
def getchunks(im, **params):
"""Return a list of PNG chunks representing this image."""
class collector:
data = []
def write(self, data):
pass
def append(self, chunk):
self.data.append(chunk)
def append(fp, cid, *data):
data = string.join(data, "")
hi, lo = Image.core.crc32(data, Image.core.crc32(cid))
crc = o16(hi) + o16(lo)
fp.append((cid, data, crc))
fp = collector()
try:
im.encoderinfo = params
_save(im, fp, None, append)
finally:
del im.encoderinfo
return fp.data
# --------------------------------------------------------------------
# Registry
Image.register_open("PNG", PngImageFile, _accept)
Image.register_save("PNG", _save)
Image.register_extension("PNG", ".png")
Image.register_mime("PNG", "image/png")
|
Bismarrck/tensorflow | refs/heads/master | tensorflow/contrib/tpu/python/tpu/tpu_feed.py | 6 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Helper library for handling infeed between hosts and TPUs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_sharding
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
class InfeedQueue(object):
"""A helper object to build a device infeed queue.
The InfeedQueue builds the host-side and device-side Ops to enqueue and
dequeue elements, respectively, and ensures that their types and
shapes match.
"""
def __init__(self,
number_of_tuple_elements=None,
tuple_types=None,
tuple_shapes=None,
shard_dimensions=None,
name=None):
"""Creates a new InfeedQueue with the given configuration.
The configuration need not be fully specified at creation since it
can be modified subsequently by methods that set the values
explicitly or infer them from the shapes of inputs.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
tuple_types: if not None, a list of types of the elements of the queue.
tuple_shapes: if not None, a list of shapes of the elements of the queue.
shard_dimensions: if not None, a list of dimensions on which the
elements of the queue should be sharded during automatic
parallelization.
name: the name of the queue.
Raises:
ValueError: if number_of_tuple_elements <= 0; or
number_of_tuple_arguments, tuple_types, tuple_shapes, and
shard_dimensions are all None; or the length of tuple_types,
tuple_shapes, or shard_dimensions is not equal to
number_of_tuple_elements; or any element of shard_dimensions
can't be converted to a Dimension.
TypeError: if any element of tuple_types or tuple_shapes can't
be converted to a dtype or TensorShape, respectively.
"""
self._frozen = False
self._generated_enqueue_ops = False
self._generated_dequeue_op = False
self._name = "InfeedQueue" if name is None else name
if number_of_tuple_elements is None:
if tuple_types is not None:
number_of_tuple_elements = len(tuple_types)
elif tuple_shapes is not None:
number_of_tuple_elements = len(tuple_shapes)
elif shard_dimensions is not None:
number_of_tuple_elements = len(shard_dimensions)
else:
raise ValueError(
"number of tuple elements cannot be inferred from InfeedQueue "
"constructor")
if number_of_tuple_elements <= 0:
raise ValueError("number_of_tuple_elements %d must be > 0" %
number_of_tuple_elements)
# Make an empty sharding policy for each tuple element.
self._sharding_policies = [
tpu_sharding.ShardingPolicy()
for _ in xrange(number_of_tuple_elements)
]
if tuple_types is not None:
self.set_tuple_types(tuple_types)
else:
self._tuple_types = None
if tuple_shapes is not None:
self.set_tuple_shapes(tuple_shapes)
else:
self._tuple_shapes = None
if shard_dimensions is not None:
self.set_shard_dimensions(shard_dimensions)
self._validate()
def _validate(self):
"""Checks that the configuration is self-consistent.
Raises:
ValueError: if the shapes and sharding policies don't match.
"""
if self.tuple_shapes is not None:
for (policy, shape) in zip(self._sharding_policies, self._tuple_shapes):
# Raise an error if the policy is incompatible with the shape.
_ = policy.get_sharded_shape(shape)
@property
def number_of_tuple_elements(self):
"""Returns the number of InfeedQueue tuple elements."""
return len(self._sharding_policies)
@property
def tuple_types(self):
"""Returns the types of the InfeedQueue tuple elements."""
return self._tuple_types
def set_tuple_types(self, tuple_types):
"""Sets the type of each element of the queue.
tuple_types must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a dtype.
Args:
tuple_types: the types of each queue element.
Raises:
ValueError: if tuple_types is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_types cannot be converted to a
dtype.
"""
if len(tuple_types) != self.number_of_tuple_elements:
raise ValueError("tuple_types is %s, but must be a list of length %d" %
(str(tuple_types), self.number_of_tuple_elements))
if self._frozen:
for (frozen, updated) in zip(self._tuple_types, tuple_types):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible type. Frozen types are %s, updated types are %s" % (
str(self._tuple_types), str(tuple_types)))
else:
try:
self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]
except (TypeError) as e:
raise TypeError(
"tuple_types is %s, but must be a list of elements each "
"convertible to dtype: got error %s" % (str(tuple_types), str(e)))
@property
def tuple_shapes(self):
"""Returns the shapes of the InfeedQueue tuple elements."""
return self._tuple_shapes
def set_tuple_shapes(self, tuple_shapes):
"""Sets the shape of each element of the queue.
tuple_shapes must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a TensorShape.
Args:
tuple_shapes: the shapes of each queue element.
Raises:
ValueError: if tuple_shapes is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_shapes cannot be converted to
a TensorShape.
"""
if len(tuple_shapes) != self.number_of_tuple_elements:
raise ValueError("tuple_shapes is %s, but must be a list of length %d" %
(str(tuple_shapes), self.number_of_tuple_elements))
try:
tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]
except (ValueError, TypeError) as e:
raise TypeError(
"tuple_shapes is %s, but must be a list of elements each "
"convertible to TensorShape: got error %s" % (str(tuple_shapes),
str(e)))
if self._frozen:
for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes):
if frozen != updated:
raise ValueError(
"Trying to update InfeedQueue with frozen configuration with an "
"incompatible shape. Frozen shapes are %s, updated shapes are %s"
% (str(self._tuple_shapes), str(tuple_shapes)))
else:
self._tuple_shapes = tuple_shapes
self._validate()
@property
def sharding_policies(self):
"""Returns the sharding policies of the InfeedQueue tuple elements."""
return self._sharding_policies
@property
def shard_dimensions(self):
"""Gets the shard dimension of each tuple element.
Returns:
A list of length number_of_tuple_elements, where each list entry
is the shard dimension of that tuple element or None if the
shard dimension has not been set.
"""
# The number of shards is always the same for all the policies.
return [policy.shard_dimension for policy in self._sharding_policies]
def set_shard_dimensions(self, shard_dimensions):
"""Sets the shard_dimension of each element of the queue.
shard_dimensions must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a Dimension compatible with self.tuple_shapes.
Args:
shard_dimensions: the dimensions of each queue element.
Raises:
ValueError: if shard_dimensions is not of length
self.number_of_tuple_elements; or an element of
shard_dimensions cannot be converted to a Dimension; or an
element of shard_dimensions is a Dimension that is out of
range for the corresponding tuple element shape.
"""
if len(shard_dimensions) != self.number_of_tuple_elements:
raise ValueError("shard_dimensions is %s, but must be a list of length %d"
% (str(shard_dimensions),
self.number_of_tuple_elements))
for (policy, dimension) in zip(self._sharding_policies, shard_dimensions):
policy.set_shard_dimension(dimension)
self._validate()
@property
def number_of_shards(self):
"""Gets the number of shards to use for the InfeedQueue.
Returns:
Number of shards or None if the number of shards has not been set.
"""
# The number of shards is always the same for all the policies.
return self._sharding_policies[0].number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards to use for the InfeedQueue.
Args:
number_of_shards: number of ways to shard the InfeedQueue.
Raises:
ValueError: if number_of_shards is not > 0; or the policies have
been frozen and number_of_shards was already set to something
else.
"""
for policy in self._sharding_policies:
policy.set_number_of_shards(number_of_shards)
self._validate()
def set_configuration_from_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of Tensors whose types and shapes are used
to set the queue configuration.
Args:
input_tensors: list of Tensors of the same types and shapes as
the desired queue Tuple.
Raises:
ValueError: if input_tensors is not a list of length
self.number_of_tuple_elements
"""
if len(input_tensors) != self.number_of_tuple_elements:
raise ValueError("input_tensors is %s, but should be a list of %d Tensors"
% (str(input_tensors), self.number_of_tuple_elements))
self.set_tuple_shapes([t.shape for t in input_tensors])
self.set_tuple_types([t.dtype for t in input_tensors])
def set_configuration_from_sharded_input_tensors(self, input_tensors):
"""Sets the shapes and types of the queue tuple elements.
input_tensors is a list of lists of Tensors whose types and shapes are used
to set the queue configuration. The length of the outer list is the number
of shards required, and each inner list is the tuple of Tensors to use to
determine the types and shapes of the corresponding shard. This method
depends on the shard dimension, and calling it freezes the shard policy.
Args:
input_tensors: list of lists of Tensors. The outer list length corresponds
to the desired number of shards, and each inner list is the size
and shape of the desired configuration of the corresponding shard.
Raises:
ValueError: if any inner list is not a list of length
self.number_of_tuple_elements; or the inner lists do not combine to
form a consistent unsharded shape.
TypeError: if the types of the Tensors in the inner lists do not match.
"""
if not self._frozen:
# Unset the tuple shapes in case the configuration becomes
# transiently inconsistent.
self._tuple_shapes = None
number_of_shards = len(input_tensors)
self.set_number_of_shards(number_of_shards)
for t in input_tensors:
if len(t) != self.number_of_tuple_elements:
raise ValueError(
"input_tensors is %s but must be a list of lists, where each inner"
" list has length number_of_tuple_elements=%d" % (
str(input_tensors), self.number_of_tuple_elements))
# Transpose the inputs to make a list of shard shapes for each tuple
# element.
sharded_shapes = [[t[i].shape for t in input_tensors]
for i in xrange(self.number_of_tuple_elements)]
# For each tuple, get the unsharded shape using that tuple's policy.
unsharded_shapes = [
policy.get_unsharded_shape(s)
for (policy, s) in zip(self._sharding_policies, sharded_shapes)
]
self.set_tuple_shapes(unsharded_shapes)
for i in xrange(1, self.number_of_shards):
for (t1, t2) in zip(input_tensors[0], input_tensors[i]):
if t1.dtype != t2.dtype:
raise TypeError(
"types of the tuple elements of input_tensors %s are not "
"consistent" % str(input_tensors))
self.set_tuple_types([t.dtype for t in input_tensors[0]])
def freeze(self):
"""Freezes the InfeedQueue so it can no longer be modified.
The configuration is implicitly frozen before any host-side or
device-side Ops are generated. The configuration cannot be frozen
until the types and shapes of the tuple elements have been set.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set.
"""
self._frozen = True
if self._tuple_types is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple types.")
if self._tuple_shapes is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for shape in self._tuple_shapes:
if shape.dims is None:
raise ValueError(
"Can't freeze an InfeedQueue without setting all tuple shapes.")
for policy in self._sharding_policies:
policy.freeze()
self._validate()
def generate_dequeue_op(self, tpu_device=0):
"""Generates the device-side Op to dequeue a tuple from the queue.
Implicitly freezes the queue configuration if it is not already
frozen, which will raise errors if the shapes and types have not
been fully specified.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed. If None, no explicit placement will be performed, and it is up
to the user to call this API from within a proper TPU device scope.
The XLA code will fail if the TPU dequeue instruction is not bound to
any device.
Returns:
A list of Outputs corresponding to a shard of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
if tpu_device is not None:
with ops.device(tpu.core(tpu_device)):
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
else:
return tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
def _generate_enqueue_op(self,
inputs,
name_prefix,
index,
device=None,
tpu_ordinal=-1):
"""Generate a host-side Op to enqueue a tuple to the queue.
If device is None the inputs are all required to have the same
device specification, and the enqueue Op is colocated with
inputs[0]. Otherwise the enqueue Op is placed on 'device'.
Args:
inputs: a list of Tensors with the types and shapes of the tuple elements.
name_prefix: the base name for the Op.
index: the shard index, used to uniquify the Op name.
device: device to place the Op on, or None if it should be
colocated with the inputs.
tpu_ordinal: ordinal of the TPU device on the host to use for
infeed if device is a CPU device. Should be set to -1 if device
is a TPU device.
Returns:
An Op corresponding to a shard of infeed enqueued at the host,
suitable for use within a replicated block.
Raises:
ValueError: if device is None and inputs do not all have the
same device specification.
"""
full_name = "%s/%d" % (name_prefix, index)
shapes = [t.shape for t in inputs]
if device is None:
devices = [t.device for t in inputs]
for i in xrange(1, self.number_of_tuple_elements):
if devices[0] != devices[i]:
raise ValueError(
"input devices for shard %d are %s, but should all be the same" %
(index, str(devices)))
with ops.colocate_with(inputs[0]):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
else:
with ops.device(device):
return tpu_ops.infeed_enqueue_tuple(
inputs=inputs,
shapes=shapes,
name=full_name,
device_ordinal=tpu_ordinal)
def generate_enqueue_ops(self,
sharded_inputs,
tpu_ordinal_function=None,
placement_function=None):
"""Generates the host-side Ops to enqueue the shards of a tuple.
sharded_inputs is a list, one for each shard, of lists of
Tensors. sharded_inputs[0] is the tuple of Tensors to use to feed
shard 0 if the queue. Returns the host-side Ops that must be run to
enqueue the sharded tuple. The Op for shard i is colocated with the inputs
for shard i.
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of sharded_inputs, an error
will be raised.
Args:
sharded_inputs: a list of lists of Tensors. The length of the outer list
determines the number of shards. Each inner list indicates the types
and shapes of the tuples in the corresponding shard.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. tpu_ordinal_function must be
set if the inputs are placed on CPU devices.
placement_function: if not None, a function that takes the shard index as
input and returns the host device where the enqueue op should be placed
on.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(sharded_inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
if tpu_ordinal_function is None:
tpu_ordinal_function = lambda index: -1
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
tpu_ordinal=tpu_ordinal_function(index),
device=placement_function(index) if placement_function else None)
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
# TODO(misard) Generalize this to the case of systems that don't
# have 8 devices per host, and figure out what to do with
# model-parallelism.
def _default_placement_function(self, index):
return "/task:%d/device:CPU:0" % (index / 8)
def _default_ordinal_function(self, index):
return index % 8
# TODO(b/36470756) remove this from tutorials once we have a better story
# for automatic placement of input pipelines.
def split_inputs_and_generate_enqueue_ops(self,
inputs,
device_assignment=None,
placement_function=None,
tpu_ordinal_function=None):
"""POORLY-PERFORMING ON MULTI-HOST SYSTEMS.
Generates the host-side Ops to enqueue a tuple.
This method performs poorly because it takes an entire input on a single
host, splits it, and distributes it to all of the cores. It is present only
to simplify tutorial examples.
inputs is a list of Tensors to use to feed the queue. Each input is split
into self.number_of_shards shards. Returns an Op for each shard to enqueue
the shard. The Op for shard i is placed on device placement_function(i).
Implicitly freezes the queue configuration if it is not already
frozen. If the configuration has already been frozen, and is not
compatible with the types and shapes of inputs, an error
will be raised.
Args:
inputs: a list of Tensors which indicates the types and shapes of the
queue tuple.
device_assignment: if not `None`, a TPU `DeviceAssignment`. If
device_assignment is not `None`, but `placement_function` and
`ordinal_function` are None, then `device_assignment` will be used to
place infeeds on the first k TPU shards, where k is the number of shards
in the queue. If all three are `None`, then default placement and
ordinal functions are used.
placement_function: if not None, a function that takes the shard
index as input and returns a device string indicating which
device the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
tpu_ordinal_function: if not None, a function that takes the
shard index as input and returns the ordinal of the TPU device
the shard's infeed should be placed on. If placement_function
and tpu_ordinal_function are None, inputs are sharded round-robin
across the devices in the system.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of inputs are not compatible with the frozen
configuration.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of inputs are not compatible with the frozen
configuration.
"""
if device_assignment is None:
if placement_function is None:
placement_function = self._default_placement_function
if tpu_ordinal_function is None:
tpu_ordinal_function = self._default_ordinal_function
else:
def _placement_function_from_map(index):
return device_assignment.host_device(replica=index)
def _ordinal_function_from_map(index):
return device_assignment.tpu_ordinal(replica=index)
if placement_function is None:
placement_function = _placement_function_from_map
if tpu_ordinal_function is None:
tpu_ordinal_function = _ordinal_function_from_map
self.set_configuration_from_input_tensors(inputs)
self.freeze()
if self._generated_enqueue_ops:
raise ValueError("Can't generate two enqueue Ops from the same queue")
self._generated_enqueue_ops = True
split_name_prefix = "%s/split" % self._name
if self.number_of_shards == 1:
transposed_sharded_inputs = [[inp] for inp in inputs]
else:
def split_fn(inp, num_shards, axis, name):
with ops.colocate_with(inp):
return array_ops.split(inp, num_shards, axis=axis, name=name)
transposed_sharded_inputs = [
split_fn(
inp,
self.number_of_shards,
axis=policy.shard_dimension,
name="%s/%d" % (split_name_prefix, index))
for (inp, policy, index) in zip(inputs, self._sharding_policies,
xrange(self.number_of_tuple_elements))
]
sharded_inputs = [[shard[i] for shard in transposed_sharded_inputs]
for i in xrange(self.number_of_shards)]
name_prefix = "%s/enqueue" % self._name
return [
self._generate_enqueue_op(
shard,
name_prefix,
index,
device=placement_function(index),
tpu_ordinal=tpu_ordinal_function(index))
for (shard, index) in zip(sharded_inputs, xrange(self.number_of_shards))
]
class _PartitionedInfeedQueue(InfeedQueue):
"""A helper object to build a device infeed queue with input partition.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
device_assignment: A TPU `DeviceAssignment` which is used to place all the
partitions to different TPU infeed queues.
host_id: The id of the host machine.
input_partition_dims: A nested list/tuple of integers. Each inner
list/tuple describes how to partition the corresponding input tensor.
tuple_types: If not None, a list of types of the elements of the queue.
tuple_shapes: If not None, a list of shapes of the elements of the queue.
name: The name of the queue.
"""
def __init__(self,
number_of_tuple_elements,
device_assignment,
host_id,
input_partition_dims=None,
tuple_types=None,
tuple_shapes=None,
name=None):
super(_PartitionedInfeedQueue, self).__init__(
number_of_tuple_elements=number_of_tuple_elements,
tuple_types=tuple_types,
tuple_shapes=None,
shard_dimensions=None,
name="PartitionedInfeedQueue" if name is None else name)
self._input_partition_dims = input_partition_dims
self._host_id = host_id
self._device_assignment = device_assignment
def generate_dequeue_op(self, tpu_device=0):
"""Generate TPU dequeue ops.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed.
Returns:
A list of Outputs corresponding to a partition of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op:
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
with ops.device(tpu.core(tpu_device)):
values = tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
return self._tag_sharding_attribute_for_dequeued_tensors(
values, self._input_partition_dims)
def generate_enqueue_ops(self, per_host_sharded_inputs):
"""Generates the host-side Ops to enqueue the partitioned inputs.
per_host_sharded_inputs is a list, one for each replica, of lists of
Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed
replica i.
sharded_inputs[i][j] is partitioned by self._input_partition_dims[j].
For example, if sharded_inputs[i][j] is a 2-D Tensor:
[[A, B, C, D],
[E ,F, G, H]]
self._input_partition_dims[j] is [2, 4].
sharded_inputs[i][j] will be partitioned and flattened into:
[A, B, C, D, E, F, G, H] and fed into the logical core ids:
[0, 1, 2, 3, 4, 5, 6, 7] respectively.
Args:
per_host_sharded_inputs: a list of lists of Tensors. The length of the
outer list determines the number of shards. Each inner list indicates
the types and shapes of the tuples in the corresponding shard.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints; or if the partition dims are invalid.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(per_host_sharded_inputs)
number_of_replicas_per_host = len(per_host_sharded_inputs)
number_of_tuple_elements = len(per_host_sharded_inputs[0])
assert len(self._input_partition_dims) == number_of_tuple_elements
per_host_enqueue_ops = []
for replica_index in range(number_of_replicas_per_host):
flattened_inputs = per_host_sharded_inputs[replica_index]
inputs_part_dims_flat = nest.flatten_up_to(flattened_inputs,
self._input_partition_dims)
inputs_parted_iters = [
iter(self._partition_or_replicate_on_host(x, dims)) for x, dims in
zip(per_host_sharded_inputs[replica_index], inputs_part_dims_flat)
]
for logical_core in xrange(self._device_assignment.num_cores_per_replica):
# Places different partitions to different logic cores.
replica_id = self._device_assignment.lookup_replicas(
self._host_id, logical_core)[replica_index]
ordinal = self._device_assignment.tpu_ordinal(
replica=replica_id, logical_core=logical_core)
infeed_inputs = []
for it in inputs_parted_iters:
input_for_device = next(it, None)
if input_for_device is not None:
infeed_inputs.append(input_for_device)
if infeed_inputs:
per_host_enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=infeed_inputs,
shapes=[x.shape for x in infeed_inputs],
name="enqueue/replica_{0}/input_{1}".format(
replica_index, logical_core),
device_ordinal=ordinal))
return per_host_enqueue_ops
def _check_input_partition_dims(self, tensor, dims):
"""Checks that input partition dims are valid for the `Tensor`.
Args:
tensor: Input tensor for partitioning.
dims: 1-D np.array of the list of integer describes how to partition the
input tensor.
Raises:
ValueError: If the tensor can't be partitioned by dims or the
num_cores_per_replica doesn't match the number of
partitions(dims.prod()).
"""
if (dims < 1).any():
raise ValueError("All input partition dims must be >= 1.")
# No partitioning, so don't perform further checks.
if dims.prod() == 1:
return
if dims.prod() != self._device_assignment.num_cores_per_replica:
raise ValueError(
"The product of each input parition dim should equal to "
"num_cores_per_replica. (dim = {}, num_cores_per_replica "
"= {})".format(dims, self._device_assignment.num_cores_per_replica))
if dims.shape[0] != tensor.shape.ndims:
raise ValueError(
"Input partition dims must have the same number of dimensions "
"as the `Tensor` to be partitioned. (tensor shape = {}, input "
"partition dims = {}).".format(tensor.shape.as_list(), dims))
tensor.shape.assert_is_fully_defined()
def _partition_or_replicate_on_host(self, tensor, dims):
"""Partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partioned tensors.
"""
if dims is None:
return itertools.repeat(tensor)
dims = np.array(dims)
self._check_input_partition_dims(tensor, dims)
output = [tensor]
shape_list = np.array(tensor.shape.as_list())
quotients, remainders = np.divmod(shape_list, dims)
for axis, (quotient, remainder, dim, original_size) in enumerate(
zip(quotients, remainders, dims, shape_list)):
if dim <= 1:
continue
if remainder > 0:
# For each dimension, when it cannot be evenly partitioned, XLA assumes
# tensors are partitioned in a greedy manner by using
# ceil_ratio(size/dim) first. E.g. 2D tensor with shape (5, 14) and dims
# are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] =>
# [[(3, 4), (3, 4), (2, 4), (2, 2)],
# [(2, 4), (2, 4), (2, 4), (2, 2)]]
ceil_ratio = quotient + 1
num_full_slots, left_over = np.divmod(original_size, ceil_ratio)
num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over]
if len(num_or_size_splits) < dim:
num_or_size_splits += [0] * (dim - len(num_or_size_splits))
new_output = []
for x in output:
new_output.append(
array_ops.split(
x, num_or_size_splits=num_or_size_splits, axis=axis))
output = new_output
else:
output = [array_ops.split(x, dim, axis=axis) for x in output]
output = nest.flatten(output)
return output
def _tag_sharding_attribute_for_dequeued_tensor(self, tensor, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensor.
Args:
tensor: The dequeued tensor on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same tensor with the xla_sharding attribute.
"""
if dims is None:
return xla_sharding.replicate(tensor)
elif np.prod(dims) == 1:
return xla_sharding.assign_device(tensor, 0)
else:
tile_assignment = np.arange(np.prod(dims)).reshape(dims)
return xla_sharding.tile(
tensor=tensor,
tile_assignment=tile_assignment)
def _tag_sharding_attribute_for_dequeued_tensors(self, dequeues, dims):
"""Tags appropriate XLA sharding attribute to the dequeued tensors.
Args:
dequeues: A list of dequeued tensors on TPU.
dims: A list of integer describes how the tensor is partitioned.
Returns:
The same dequeues with appropriate xla_sharding attribute.
"""
nest.assert_shallow_structure(dequeues, dims)
return nest.map_structure_up_to(
dequeues, self._tag_sharding_attribute_for_dequeued_tensor, dequeues,
dims)
|
spblightadv/rethinkdb | refs/heads/next | test/scenarios/static_cluster.py | 15 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse, workload_runner
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
op["use-proxy"] = vcoptparse.BoolFlag("--use-proxy")
op["num-nodes"] = vcoptparse.IntFlag("--num-nodes", 3)
op["num-shards"] = vcoptparse.IntFlag("--num-shards", 2)
op["workload"] = vcoptparse.PositionalArg()
op["timeout"] = vcoptparse.IntFlag("--timeout", 1200)
opts = op.parse(sys.argv)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
print("Starting cluster of %d servers (%.2fs)" % (opts["num-nodes"], time.time() - startTime))
with driver.Cluster(initial_servers=opts["num-nodes"], output_folder='.', wait_until_ready=False, command_prefix=command_prefix, extra_options=serve_options) as cluster:
if opts["use-proxy"]:
driver.ProxyProcess(cluster, 'proxy-logfile', console_output='proxy-output', command_prefix=command_prefix, extra_options=serve_options)
cluster.wait_until_ready()
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
server = cluster[0]
conn = r.connect(host=server.host, port=server.driver_port)
print("Creating db/table %s/%s (%.2fs)" % (dbName, tableName, time.time() - startTime))
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
print("Splitting into %d shards (%.2fs)" % (opts["num-shards"], time.time() - startTime))
r.db(dbName).table(tableName).reconfigure(shards=opts["num-shards"], replicas=1).run(conn)
r.db(dbName).wait().run(conn)
print("Starting workload: %s (%.2fs)" % (opts["workload"], time.time() - startTime))
workloadServer = server if not opts["use-proxy"] else cluster[-1]
workload_ports = workload_runner.RDBPorts(host=workloadServer.host, http_port=workloadServer.http_port, rdb_port=workloadServer.driver_port, db_name=dbName, table_name=tableName)
workload_runner.run(opts["workload"], workload_ports, opts["timeout"])
print("Ended workload: %s (%.2fs)" % (opts["workload"], time.time() - startTime))
print("Done (%.2fs)" % (time.time() - startTime))
|
nysan/yocto-autobuilder | refs/heads/master | lib/python2.6/site-packages/Twisted-11.0.0-py2.6-linux-x86_64.egg/twisted/mail/mail.py | 28 | # -*- test-case-name: twisted.mail.test.test_mail -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Mail support for twisted python.
"""
# Twisted imports
from twisted.internet import defer
from twisted.application import service, internet
from twisted.python import util
from twisted.python import log
from twisted import cred
import twisted.cred.portal
# Sibling imports
from twisted.mail import protocols, smtp
# System imports
import os
from zope.interface import implements, Interface
class DomainWithDefaultDict:
'''Simulate a dictionary with a default value for non-existing keys.
'''
def __init__(self, domains, default):
self.domains = domains
self.default = default
def setDefaultDomain(self, domain):
self.default = domain
def has_key(self, name):
return 1
def fromkeys(klass, keys, value=None):
d = klass()
for k in keys:
d[k] = value
return d
fromkeys = classmethod(fromkeys)
def __contains__(self, name):
return 1
def __getitem__(self, name):
return self.domains.get(name, self.default)
def __setitem__(self, name, value):
self.domains[name] = value
def __delitem__(self, name):
del self.domains[name]
def __iter__(self):
return iter(self.domains)
def __len__(self):
return len(self.domains)
def __str__(self):
"""
Return a string describing the underlying domain mapping of this
object.
"""
return '<DomainWithDefaultDict %s>' % (self.domains,)
def __repr__(self):
"""
Return a pseudo-executable string describing the underlying domain
mapping of this object.
"""
return 'DomainWithDefaultDict(%s)' % (self.domains,)
def get(self, key, default=None):
return self.domains.get(key, default)
def copy(self):
return DomainWithDefaultDict(self.domains.copy(), self.default)
def iteritems(self):
return self.domains.iteritems()
def iterkeys(self):
return self.domains.iterkeys()
def itervalues(self):
return self.domains.itervalues()
def keys(self):
return self.domains.keys()
def values(self):
return self.domains.values()
def items(self):
return self.domains.items()
def popitem(self):
return self.domains.popitem()
def update(self, other):
return self.domains.update(other)
def clear(self):
return self.domains.clear()
def setdefault(self, key, default):
return self.domains.setdefault(key, default)
class IDomain(Interface):
"""An email domain."""
def exists(user):
"""
Check whether or not the specified user exists in this domain.
@type user: C{twisted.protocols.smtp.User}
@param user: The user to check
@rtype: No-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise twisted.protocols.smtp.SMTPBadRcpt: Raised if the given
user does not exist in this domain.
"""
def addUser(user, password):
"""Add a username/password to this domain."""
def startMessage(user):
"""Create and return a new message to be delivered to the given user.
DEPRECATED. Implement validateTo() correctly instead.
"""
def getCredentialsCheckers():
"""Return a list of ICredentialsChecker implementors for this domain.
"""
class IAliasableDomain(IDomain):
def setAliasGroup(aliases):
"""Set the group of defined aliases for this domain
@type aliases: C{dict}
@param aliases: Mapping of domain names to objects implementing
C{IAlias}
"""
def exists(user, memo=None):
"""
Check whether or not the specified user exists in this domain.
@type user: C{twisted.protocols.smtp.User}
@param user: The user to check
@type memo: C{dict}
@param memo: A record of the addresses already considered while
resolving aliases. The default value should be used by all
external code.
@rtype: No-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise twisted.protocols.smtp.SMTPBadRcpt: Raised if the given
user does not exist in this domain.
"""
class BounceDomain:
"""A domain in which no user exists.
This can be used to block off certain domains.
"""
implements(IDomain)
def exists(self, user):
raise smtp.SMTPBadRcpt(user)
def willRelay(self, user, protocol):
return False
def addUser(self, user, password):
pass
def startMessage(self, user):
"""
No code should ever call this function.
"""
raise NotImplementedError(
"No code should ever call this method for any reason")
def getCredentialsCheckers(self):
return []
class FileMessage:
"""A file we can write an email too."""
implements(smtp.IMessage)
def __init__(self, fp, name, finalName):
self.fp = fp
self.name = name
self.finalName = finalName
def lineReceived(self, line):
self.fp.write(line+'\n')
def eomReceived(self):
self.fp.close()
os.rename(self.name, self.finalName)
return defer.succeed(self.finalName)
def connectionLost(self):
self.fp.close()
os.remove(self.name)
class MailService(service.MultiService):
"""An email service."""
queue = None
domains = None
portals = None
aliases = None
smtpPortal = None
def __init__(self):
service.MultiService.__init__(self)
# Domains and portals for "client" protocols - POP3, IMAP4, etc
self.domains = DomainWithDefaultDict({}, BounceDomain())
self.portals = {}
self.monitor = FileMonitoringService()
self.monitor.setServiceParent(self)
self.smtpPortal = cred.portal.Portal(self)
def getPOP3Factory(self):
return protocols.POP3Factory(self)
def getSMTPFactory(self):
return protocols.SMTPFactory(self, self.smtpPortal)
def getESMTPFactory(self):
return protocols.ESMTPFactory(self, self.smtpPortal)
def addDomain(self, name, domain):
portal = cred.portal.Portal(domain)
map(portal.registerChecker, domain.getCredentialsCheckers())
self.domains[name] = domain
self.portals[name] = portal
if self.aliases and IAliasableDomain.providedBy(domain):
domain.setAliasGroup(self.aliases)
def setQueue(self, queue):
"""Set the queue for outgoing emails."""
self.queue = queue
def requestAvatar(self, avatarId, mind, *interfaces):
if smtp.IMessageDelivery in interfaces:
a = protocols.ESMTPDomainDelivery(self, avatarId)
return smtp.IMessageDelivery, a, lambda: None
raise NotImplementedError()
def lookupPortal(self, name):
return self.portals[name]
def defaultPortal(self):
return self.portals['']
class FileMonitoringService(internet.TimerService):
def __init__(self):
self.files = []
self.intervals = iter(util.IntervalDifferential([], 60))
def startService(self):
service.Service.startService(self)
self._setupMonitor()
def _setupMonitor(self):
from twisted.internet import reactor
t, self.index = self.intervals.next()
self._call = reactor.callLater(t, self._monitor)
def stopService(self):
service.Service.stopService(self)
if self._call:
self._call.cancel()
self._call = None
def monitorFile(self, name, callback, interval=10):
try:
mtime = os.path.getmtime(name)
except:
mtime = 0
self.files.append([interval, name, callback, mtime])
self.intervals.addInterval(interval)
def unmonitorFile(self, name):
for i in range(len(self.files)):
if name == self.files[i][1]:
self.intervals.removeInterval(self.files[i][0])
del self.files[i]
break
def _monitor(self):
self._call = None
if self.index is not None:
name, callback, mtime = self.files[self.index][1:]
try:
now = os.path.getmtime(name)
except:
now = 0
if now > mtime:
log.msg("%s changed, notifying listener" % (name,))
self.files[self.index][3] = now
callback(name)
self._setupMonitor()
|
SerCeMan/intellij-community | refs/heads/master | python/testData/refactoring/extractsuperclass/moveFields.before.py | 80 | class FromClass(object):
CLASS_FIELD = 42
def __init__(self):
self.instance_field = 100500
|
domasx2/django-angular-docker-seed | refs/heads/master | backend/apps/sampleapp/admin.py | 2 | from django.contrib import admin
from .models import Upload
# Register your models here.
@admin.register(Upload)
class UploadAdmin(admin.ModelAdmin):
pass |
Valencia-arch-project/valencia-arch1 | refs/heads/master | lib/DLLSocket/server_controller.py | 19 | import subprocess
import socket
import urlparse
UDP_IP="127.0.0.1"
UDP_PORT=8019
sock = socket.socket( socket.AF_INET, # Internet
socket.SOCK_DGRAM ) # UDP
sock.bind( (UDP_IP,UDP_PORT) )
last_ticker_state = None
def handle_message(data, addr):
global last_ticker_state
params = urlparse.parse_qs(data)
print(data)
try:
if params["type"][0] == "log" and str(params["log"][0]) and str(params["message"][0]):
open(params["log"][0],"a+").write(params["message"][0]+"\n")
except IOError:
pass
except KeyError:
pass
try:
if params["type"][0] == "ticker_state" and str(params["message"][0]):
last_ticker_state = str(params["message"][0])
except KeyError:
pass
try:
if params["type"][0] == "startup" and last_ticker_state:
open("crashlog.txt","a+").write("Server exited, last ticker state was: "+last_ticker_state+"\n")
except KeyError:
pass
sock.settimeout(60*6) # 10 minute timeout
while True:
try:
data, addr = sock.recvfrom( 1024 ) # buffer size is 1024 bytes
handle_message(data,addr)
except socket.timeout:
# try to start the server again
print("Server timed out.. attempting restart.")
if last_ticker_state:
open("crashmsg.txt","a+").write("Server crashed, trying to reboot. last ticker state: "+last_ticker_state+"\n")
subprocess.call("killall -9 DreamDaemon")
subprocess.call("./start") |
uni2u/neutron | refs/heads/master | neutron/plugins/cisco/n1kv/n1kv_client.py | 7 | # Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import eventlet
import netaddr
import requests
from neutron.common import exceptions as n_exc
from neutron.extensions import providernet
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.common import config as c_conf
from neutron.plugins.cisco.db import network_db_v2
from neutron.plugins.cisco.extensions import n1kv
LOG = logging.getLogger(__name__)
class Client(object):
"""
Client for the Cisco Nexus1000V Neutron Plugin.
This client implements functions to communicate with
Cisco Nexus1000V VSM.
For every Neutron objects, Cisco Nexus1000V Neutron Plugin
creates a corresponding object in the controller (Cisco
Nexus1000V VSM).
CONCEPTS:
Following are few concepts used in Nexus1000V VSM:
port-profiles:
Policy profiles correspond to port profiles on Nexus1000V VSM.
Port profiles are the primary mechanism by which network policy is
defined and applied to switch interfaces in a Nexus 1000V system.
network-segment:
Each network-segment represents a broadcast domain.
network-segment-pool:
A network-segment-pool contains one or more network-segments.
logical-network:
A logical-network contains one or more network-segment-pools.
bridge-domain:
A bridge-domain is created when the network-segment is of type VXLAN.
Each VXLAN <--> VLAN combination can be thought of as a bridge domain.
ip-pool:
Each ip-pool represents a subnet on the Nexus1000V VSM.
vm-network:
vm-network refers to a network-segment and policy-profile.
It maintains a list of ports that uses the network-segment and
policy-profile this vm-network refers to.
events:
Events correspond to commands that are logged on Nexus1000V VSM.
Events are used to poll for a certain resource on Nexus1000V VSM.
Event type of port_profile: Return all updates/create/deletes
of port profiles from the VSM.
Event type of port_profile_update: Return only updates regarding
policy-profiles.
Event type of port_profile_delete: Return only deleted policy profiles.
WORK FLOW:
For every network profile a corresponding logical-network and
a network-segment-pool, under this logical-network, will be created.
For every network created from a given network profile, a
network-segment will be added to the network-segment-pool corresponding
to that network profile.
A port is created on a network and associated with a policy-profile.
Hence for every unique combination of a network and a policy-profile, a
unique vm-network will be created and a reference to the port will be
added. If the same combination of network and policy-profile is used by
another port, the references to that port will be added to the same
vm-network.
"""
# Define paths for the URI where the client connects for HTTP requests.
port_profiles_path = "/virtual-port-profile"
network_segment_path = "/network-segment/%s"
network_segment_pool_path = "/network-segment-pool/%s"
ip_pool_path = "/ip-pool-template/%s"
ports_path = "/kvm/vm-network/%s/ports"
port_path = "/kvm/vm-network/%s/ports/%s"
vm_networks_path = "/kvm/vm-network"
vm_network_path = "/kvm/vm-network/%s"
bridge_domains_path = "/kvm/bridge-domain"
bridge_domain_path = "/kvm/bridge-domain/%s"
logical_network_path = "/logical-network/%s"
events_path = "/kvm/events"
clusters_path = "/cluster"
encap_profiles_path = "/encapsulation-profile"
encap_profile_path = "/encapsulation-profile/%s"
pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size)
def __init__(self, **kwargs):
"""Initialize a new client for the plugin."""
self.format = 'json'
self.hosts = self._get_vsm_hosts()
self.action_prefix = 'http://%s/api/n1k' % self.hosts[0]
self.timeout = c_conf.CISCO_N1K.http_timeout
def list_port_profiles(self):
"""
Fetch all policy profiles from the VSM.
:returns: JSON string
"""
return self._get(self.port_profiles_path)
def create_bridge_domain(self, network, overlay_subtype):
"""
Create a bridge domain on VSM.
:param network: network dict
:param overlay_subtype: string representing subtype of overlay network
"""
body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX,
'segmentId': network[providernet.SEGMENTATION_ID],
'subType': overlay_subtype,
'tenantId': network['tenant_id']}
if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN:
body['groupIp'] = network[n1kv.MULTICAST_IP]
return self._post(self.bridge_domains_path,
body=body)
def delete_bridge_domain(self, name):
"""
Delete a bridge domain on VSM.
:param name: name of the bridge domain to be deleted
"""
return self._delete(self.bridge_domain_path % name)
def create_network_segment(self, network, network_profile):
"""
Create a network segment on the VSM.
:param network: network dict
:param network_profile: network profile dict
"""
body = {'publishName': network['id'],
'description': network['name'],
'id': network['id'],
'tenantId': network['tenant_id'],
'networkSegmentPool': network_profile['id'], }
if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN:
body['vlan'] = network[providernet.SEGMENTATION_ID]
elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:
body['bridgeDomain'] = (network['id'] +
c_const.BRIDGE_DOMAIN_SUFFIX)
if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK:
body['mode'] = c_const.NETWORK_TYPE_TRUNK
body['segmentType'] = network_profile['sub_type']
if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN:
body['addSegments'] = network['add_segment_list']
body['delSegments'] = network['del_segment_list']
else:
body['encapProfile'] = (network['id'] +
c_const.ENCAPSULATION_PROFILE_SUFFIX)
else:
body['mode'] = 'access'
body['segmentType'] = network_profile['segment_type']
return self._post(self.network_segment_path % network['id'],
body=body)
def update_network_segment(self, network_segment_id, body):
"""
Update a network segment on the VSM.
Network segment on VSM can be updated to associate it with an ip-pool
or update its description and segment id.
:param network_segment_id: UUID representing the network segment
:param body: dict of arguments to be updated
"""
return self._post(self.network_segment_path % network_segment_id,
body=body)
def delete_network_segment(self, network_segment_id):
"""
Delete a network segment on the VSM.
:param network_segment_id: UUID representing the network segment
"""
return self._delete(self.network_segment_path % network_segment_id)
def create_logical_network(self, network_profile, tenant_id):
"""
Create a logical network on the VSM.
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
LOG.debug(_("Logical network"))
body = {'description': network_profile['name'],
'tenantId': tenant_id}
logical_network_name = (network_profile['id'] +
c_const.LOGICAL_NETWORK_SUFFIX)
return self._post(self.logical_network_path % logical_network_name,
body=body)
def delete_logical_network(self, logical_network_name):
"""
Delete a logical network on VSM.
:param logical_network_name: string representing name of the logical
network
"""
return self._delete(
self.logical_network_path % logical_network_name)
def create_network_segment_pool(self, network_profile, tenant_id):
"""
Create a network segment pool on the VSM.
:param network_profile: network profile dict
:param tenant_id: UUID representing the tenant
"""
LOG.debug(_("network_segment_pool"))
logical_network_name = (network_profile['id'] +
c_const.LOGICAL_NETWORK_SUFFIX)
body = {'name': network_profile['name'],
'description': network_profile['name'],
'id': network_profile['id'],
'logicalNetwork': logical_network_name,
'tenantId': tenant_id}
if network_profile['segment_type'] == c_const.NETWORK_TYPE_OVERLAY:
body['subType'] = network_profile['sub_type']
return self._post(
self.network_segment_pool_path % network_profile['id'],
body=body)
def update_network_segment_pool(self, network_profile):
"""
Update a network segment pool on the VSM.
:param network_profile: network profile dict
"""
body = {'name': network_profile['name'],
'description': network_profile['name']}
return self._post(self.network_segment_pool_path %
network_profile['id'], body=body)
def delete_network_segment_pool(self, network_segment_pool_id):
"""
Delete a network segment pool on the VSM.
:param network_segment_pool_id: UUID representing the network
segment pool
"""
return self._delete(self.network_segment_pool_path %
network_segment_pool_id)
def create_ip_pool(self, subnet):
"""
Create an ip-pool on the VSM.
:param subnet: subnet dict
"""
if subnet['cidr']:
try:
ip = netaddr.IPNetwork(subnet['cidr'])
netmask = str(ip.netmask)
network_address = str(ip.network)
except (ValueError, netaddr.AddrFormatError):
msg = _("Invalid input for CIDR")
raise n_exc.InvalidInput(error_message=msg)
else:
netmask = network_address = ""
if subnet['allocation_pools']:
address_range_start = subnet['allocation_pools'][0]['start']
address_range_end = subnet['allocation_pools'][0]['end']
else:
address_range_start = None
address_range_end = None
body = {'addressRangeStart': address_range_start,
'addressRangeEnd': address_range_end,
'ipAddressSubnet': netmask,
'description': subnet['name'],
'gateway': subnet['gateway_ip'],
'dhcp': subnet['enable_dhcp'],
'dnsServersList': subnet['dns_nameservers'],
'networkAddress': network_address,
'netSegmentName': subnet['network_id'],
'id': subnet['id'],
'tenantId': subnet['tenant_id']}
return self._post(self.ip_pool_path % subnet['id'],
body=body)
def update_ip_pool(self, subnet):
"""
Update an ip-pool on the VSM.
:param subnet: subnet dictionary
"""
body = {'description': subnet['name'],
'dhcp': subnet['enable_dhcp'],
'dnsServersList': subnet['dns_nameservers']}
return self._post(self.ip_pool_path % subnet['id'],
body=body)
def delete_ip_pool(self, subnet_id):
"""
Delete an ip-pool on the VSM.
:param subnet_id: UUID representing the subnet
"""
return self._delete(self.ip_pool_path % subnet_id)
def create_vm_network(self,
port,
vm_network_name,
policy_profile):
"""
Create a VM network on the VSM.
:param port: port dict
:param vm_network_name: name of the VM network
:param policy_profile: policy profile dict
"""
body = {'name': vm_network_name,
'networkSegmentId': port['network_id'],
'networkSegment': port['network_id'],
'portProfile': policy_profile['name'],
'portProfileId': policy_profile['id'],
'tenantId': port['tenant_id'],
'portId': port['id'],
'macAddress': port['mac_address'],
}
if port.get('fixed_ips'):
body['ipAddress'] = port['fixed_ips'][0]['ip_address']
body['subnetId'] = port['fixed_ips'][0]['subnet_id']
return self._post(self.vm_networks_path,
body=body)
def delete_vm_network(self, vm_network_name):
"""
Delete a VM network on the VSM.
:param vm_network_name: name of the VM network
"""
return self._delete(self.vm_network_path % vm_network_name)
def create_n1kv_port(self, port, vm_network_name):
"""
Create a port on the VSM.
:param port: port dict
:param vm_network_name: name of the VM network which imports this port
"""
body = {'id': port['id'],
'macAddress': port['mac_address']}
if port.get('fixed_ips'):
body['ipAddress'] = port['fixed_ips'][0]['ip_address']
body['subnetId'] = port['fixed_ips'][0]['subnet_id']
return self._post(self.ports_path % vm_network_name,
body=body)
def update_n1kv_port(self, vm_network_name, port_id, body):
"""
Update a port on the VSM.
Update the mac address associated with the port
:param vm_network_name: name of the VM network which imports this port
:param port_id: UUID of the port
:param body: dict of the arguments to be updated
"""
return self._post(self.port_path % (vm_network_name, port_id),
body=body)
def delete_n1kv_port(self, vm_network_name, port_id):
"""
Delete a port on the VSM.
:param vm_network_name: name of the VM network which imports this port
:param port_id: UUID of the port
"""
return self._delete(self.port_path % (vm_network_name, port_id))
def _do_request(self, method, action, body=None,
headers=None):
"""
Perform the HTTP request.
The response is in either JSON format or plain text. A GET method will
invoke a JSON response while a PUT/POST/DELETE returns message from the
VSM in plain text format.
Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP
status code (500) i.e. an error has occurred on the VSM or SERVICE
UNAVAILABLE (503) i.e. VSM is not reachable.
:param method: type of the HTTP request. POST, GET, PUT or DELETE
:param action: path to which the client makes request
:param body: dict for arguments which are sent as part of the request
:param headers: header for the HTTP request
:returns: JSON or plain text in HTTP response
"""
action = self.action_prefix + action
if not headers and self.hosts:
headers = self._get_auth_header(self.hosts[0])
headers['Content-Type'] = self._set_content_type('json')
headers['Accept'] = self._set_content_type('json')
if body:
body = jsonutils.dumps(body, indent=2)
LOG.debug(_("req: %s"), body)
try:
resp = self.pool.spawn(requests.request,
method,
url=action,
data=body,
headers=headers,
timeout=self.timeout).wait()
except Exception as e:
raise c_exc.VSMConnectionFailed(reason=e)
LOG.debug(_("status_code %s"), resp.status_code)
if resp.status_code == requests.codes.OK:
if 'application/json' in resp.headers['content-type']:
try:
return resp.json()
except ValueError:
return {}
elif 'text/plain' in resp.headers['content-type']:
LOG.debug(_("VSM: %s"), resp.text)
else:
raise c_exc.VSMError(reason=resp.text)
def _set_content_type(self, format=None):
"""
Set the mime-type to either 'xml' or 'json'.
:param format: format to be set.
:return: mime-type string
"""
if not format:
format = self.format
return "application/%s" % format
def _delete(self, action, body=None, headers=None):
return self._do_request("DELETE", action, body=body,
headers=headers)
def _get(self, action, body=None, headers=None):
return self._do_request("GET", action, body=body,
headers=headers)
def _post(self, action, body=None, headers=None):
return self._do_request("POST", action, body=body,
headers=headers)
def _put(self, action, body=None, headers=None):
return self._do_request("PUT", action, body=body,
headers=headers)
def _get_vsm_hosts(self):
"""
Retrieve a list of VSM ip addresses.
:return: list of host ip addresses
"""
return [cr[c_const.CREDENTIAL_NAME] for cr in
network_db_v2.get_all_n1kv_credentials()]
def _get_auth_header(self, host_ip):
"""
Retrieve header with auth info for the VSM.
:param host_ip: IP address of the VSM
:return: authorization header dict
"""
username = c_cred.Store.get_username(host_ip)
password = c_cred.Store.get_password(host_ip)
auth = base64.encodestring("%s:%s" % (username, password)).rstrip()
header = {"Authorization": "Basic %s" % auth}
return header
def get_clusters(self):
"""Fetches a list of all vxlan gateway clusters."""
return self._get(self.clusters_path)
def create_encapsulation_profile(self, encap):
"""
Create an encapsulation profile on VSM.
:param encap: encapsulation dict
"""
body = {'name': encap['name'],
'addMappings': encap['add_segment_list'],
'delMappings': encap['del_segment_list']}
return self._post(self.encap_profiles_path,
body=body)
def update_encapsulation_profile(self, context, profile_name, body):
"""
Adds a vlan to bridge-domain mapping to an encapsulation profile.
:param profile_name: Name of the encapsulation profile
:param body: mapping dictionary
"""
return self._post(self.encap_profile_path
% profile_name, body=body)
def delete_encapsulation_profile(self, name):
"""
Delete an encapsulation profile on VSM.
:param name: name of the encapsulation profile to be deleted
"""
return self._delete(self.encap_profile_path % name)
|
Dekker1/moore | refs/heads/development | src/involvement/migrations/0008_auto_20170502_1728.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-02 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('involvement', '0007_auto_20170427_1347'),
]
operations = [
migrations.AlterField(
model_name='role',
name='election_email',
field=models.EmailField(default='styrelsen@utn.se', help_text='The email address to contact for more information regarding the role.', max_length=254, verbose_name='Election contact email address'),
),
]
|
tarbell-project/tarbell | refs/heads/master | tests/test_barebones.py | 2 | # -*- coding: utf-8 -*-
"""
Tests for the barebones example project
"""
import filecmp
import os
from tarbell.app import EXCLUDES, TarbellSite
TESTS_DIR = os.path.dirname(__file__)
PATH = os.path.realpath(os.path.join(TESTS_DIR, 'examples/barebones'))
BUILT = os.path.join(PATH, '_site')
PROJECT_NAME = "barebones"
def test_get_site():
site = TarbellSite(PATH)
assert os.path.realpath(site.path) == os.path.realpath(PATH)
assert site.project.name == PROJECT_NAME
def test_default_excludes():
"Ensure a basic set of excluded files"
site = TarbellSite(PATH)
assert set(site.project.EXCLUDES) == set(EXCLUDES)
|
snnn/tensorflow | refs/heads/master | tensorflow/contrib/timeseries/python/timeseries/head.py | 25 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
class _NoStatePredictOutput(export_lib.PredictOutput):
def as_signature_def(self, receiver_tensors):
no_state_receiver_tensors = {
key: value for key, value in receiver_tensors.items()
if not key.startswith(feature_keys.State.STATE_PREFIX)}
return super(_NoStatePredictOutput, self).as_signature_def(
receiver_tensors=no_state_receiver_tensors)
class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""Determines input and output signatures for a time series model."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
name: An optional name for the model.
"""
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
@property
def name(self):
return self._name
# TODO(terrytangyuan): consolidate `model_outputs` and `_Head.LossSpec`
# once `_Head.create_loss` becomes extendable
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(
self.model, features, mode)
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
@property
def logits_dimension(self):
"""See `_Head`."""
return 1
def _train_ops(self, features):
"""Add training ops to the graph."""
mode = estimator_lib.ModeKeys.TRAIN
with variable_scope.variable_scope(
"model",
# Use ResourceVariables to avoid race conditions.
use_resource=True):
model_outputs = self.create_loss(features, mode)
train_op = self.optimizer.minimize(
model_outputs.loss,
global_step=training_util.get_global_step())
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
train_op=train_op)
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
mode = estimator_lib.ModeKeys.EVAL
with variable_scope.variable_scope("model", use_resource=True):
model_outputs = self.create_loss(features, mode)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
metrics[metric_keys.MetricKeys.LOSS_MEAN] = metrics_impl.mean(
model_outputs.loss, name="average_loss")
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
eval_metric_ops=metrics,
# needed for custom metrics.
predictions=model_outputs.predictions)
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return math_ops.cast(value, dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return math_ops.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return sparse_tensor.convert_to_tensor_or_sparse_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE
]))
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope(self._name, "head"):
if labels is not None and labels != {}: # for better error messages.
raise ValueError(
"The model received a `labels`, which is not supported. "
"Pass '{}' and '{}' as features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
self._check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_savedmodel for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
class OneShotPredictionHead(TimeSeriesRegressionHead):
"""A time series head which exports a single stateless serving signature.
The serving default signature exported by this head expects `times`, `values`,
and any exogenous features, but no state. `values` has shape `[batch_size,
filter_length, num_features]` and `times` has shape `[batch_size,
total_length]`, where `total_length > filter_length`. Any exogenous features
must have their shapes prefixed by the shape of the `times` feature.
When serving, first performs filtering on the series up to `filter_length`
starting from the default start state for the model, then computes predictions
on the remainder of the series, returning them.
Model state is neither accepted nor returned, so filtering must be performed
each time predictions are requested when using this head.
"""
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for one-shot prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.TrainEvalFeatures.VALUES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE,
# One shot prediction head relies on values being shorter than
# times. Even though we're predicting eventually, we need values for
# the filtering phase.
feature_keys.TrainEvalFeatures.VALUES,
]))
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
spec = super(OneShotPredictionHead, self)._evaluate_ops(features)
# No state is fed to OneShotPredictionHead, so we don't return it; it being
# a tuple can cause issues for downstream infrastructure.
del spec.eval_metric_ops[feature_keys.State.STATE_TUPLE]
return spec
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
filtering_features = {}
prediction_features = {}
values_length = array_ops.shape(
features[feature_keys.FilteringFeatures.VALUES])[1]
for key, value in features.items():
if key == feature_keys.State.STATE_TUPLE:
# Ignore state input. The model's default start state is replicated
# across the batch.
continue
if key == feature_keys.FilteringFeatures.VALUES:
filtering_features[key] = value
else:
filtering_features[key] = value[:, :values_length]
prediction_features[key] = value[:, values_length:]
cold_filtering_outputs = self.model.define_loss(
features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
prediction_features[feature_keys.State.STATE_TUPLE] = (
cold_filtering_outputs.end_state)
with variable_scope.variable_scope("model", reuse=True):
prediction_outputs = self.model.predict(
features=prediction_features)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
_NoStatePredictOutput(prediction_outputs),
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = variable_scope.variable(
name="{}_identity_metric".format(name),
initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = state_ops.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (nest.pack_sequence_as(input_tensors, value_tensors),
control_flow_ops.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
|
OpenUpgrade-dev/OpenUpgrade | refs/heads/8.0 | addons/website_livechat/models/website.py | 380 | from openerp.osv import osv, fields
class website(osv.osv):
_inherit = "website"
_columns = {
'channel_id': fields.many2one('im_livechat.channel', string="Channel"),
}
class website_config_settings(osv.osv_memory):
_inherit = 'website.config.settings'
_columns = {
'channel_id': fields.related('website_id', 'channel_id', type='many2one', relation='im_livechat.channel', string='Live Chat Channel'),
}
|
Jorge-Rodriguez/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_datastore_maintenancemode.py | 50 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_datastore_maintenancemode
short_description: Place a datastore into maintenance mode
description:
- This module can be used to manage maintenance mode of a datastore.
author:
- "Abhijeet Kasurde (@Akasurde)"
version_added: 2.6
notes:
- Tested on vSphere 5.5, 6.0 and 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datastore:
description:
- Name of datastore to manage.
- If C(datastore_cluster) or C(cluster_name) are not set, this parameter is required.
datastore_cluster:
description:
- Name of the datastore cluster from all child datastores to be managed.
- If C(datastore) or C(cluster_name) are not set, this parameter is required.
cluster_name:
description:
- Name of the cluster where datastore is connected to.
- If multiple datastores are connected to the given cluster, then all datastores will be managed by C(state).
- If C(datastore) or C(datastore_cluster) are not set, this parameter is required.
state:
description:
- If set to C(present), then enter datastore into maintenance mode.
- If set to C(present) and datastore is already in maintenance mode, then no action will be taken.
- If set to C(absent) and datastore is in maintenance mode, then exit maintenance mode.
- If set to C(absent) and datastore is not in maintenance mode, then no action will be taken.
choices: [ present, absent ]
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter datastore into Maintenance Mode
vmware_datastore_maintenancemode:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore: '{{ datastore_name }}'
state: present
delegate_to: localhost
- name: Enter all datastores under cluster into Maintenance Mode
vmware_datastore_maintenancemode:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
state: present
delegate_to: localhost
- name: Enter all datastores under datastore cluster into Maintenance Mode
vmware_datastore_maintenancemode:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_cluster: '{{ datastore_cluster_name }}'
state: present
delegate_to: localhost
- name: Exit datastore into Maintenance Mode
vmware_datastore_maintenancemode:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore: '{{ datastore_name }}'
state: absent
delegate_to: localhost
'''
RETURN = '''
results:
description: Action taken for datastore
returned: always
type: dict
sample:
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task,
find_cluster_by_name, get_all_objs)
from ansible.module_utils._text import to_native
class VmwareDatastoreMaintenanceMgr(PyVmomi):
def __init__(self, module):
super(VmwareDatastoreMaintenanceMgr, self).__init__(module)
datastore_name = self.params.get('datastore')
cluster_name = self.params.get('cluster_name')
datastore_cluster = self.params.get('datastore_cluster')
self.datastore_objs = []
if datastore_name:
self.datastore_objs = [self.find_datastore_by_name(datastore_name=datastore_name)]
elif cluster_name:
cluster = find_cluster_by_name(self.content, cluster_name)
if not cluster:
self.module.fail_json(msg='Failed to find cluster "%(cluster_name)s".' % self.params)
self.datastore_objs = cluster.datastore
elif datastore_cluster:
datastore_cluster_obj = get_all_objs(self.content, [vim.StoragePod])
if not datastore_cluster_obj:
self.module.fail_json(msg='Failed to find datastore cluster "%(datastore_cluster)s".' % self.params)
for datastore in datastore_cluster_obj.childEntity:
self.datastore_objs.append(datastore)
else:
self.module.fail_json(msg="Please select one of 'cluster_name', 'datastore' or 'datastore_cluster'.")
self.state = self.params.get('state')
def ensure(self):
datastore_results = dict()
change_datastore_list = []
for datastore in self.datastore_objs:
changed = False
if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name
break
elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name
break
try:
if self.state == 'present':
storage_replacement_result = datastore.DatastoreEnterMaintenanceMode()
task = storage_replacement_result.task
else:
task = datastore.DatastoreExitMaintenanceMode_Task()
success, result = wait_for_task(task)
if success:
changed = True
if self.state == 'present':
datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name
else:
datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name
except vim.fault.InvalidState as invalid_state:
if self.state == 'present':
msg = "Unable to enter datastore '%s' in" % datastore.name
else:
msg = "Unable to exit datastore '%s' from" % datastore.name
msg += " maintenance mode due to : %s" % to_native(invalid_state.msg)
self.module.fail_json(msg=msg)
except Exception as exc:
if self.state == 'present':
msg = "Unable to enter datastore '%s' in" % datastore.name
else:
msg = "Unable to exit datastore '%s' from" % datastore.name
msg += " maintenance mode due to generic exception : %s" % to_native(exc)
self.module.fail_json(msg=msg)
change_datastore_list.append(changed)
changed = False
if any(change_datastore_list):
changed = True
self.module.exit_json(changed=changed, results=datastore_results)
def main():
spec = vmware_argument_spec()
spec.update(dict(
datastore=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
datastore_cluster=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
))
module = AnsibleModule(
argument_spec=spec,
required_one_of=[
['datastore', 'cluster_name', 'datastore_cluster'],
],
)
datastore_maintenance_mgr = VmwareDatastoreMaintenanceMgr(module=module)
datastore_maintenance_mgr.ensure()
if __name__ == '__main__':
main()
|
MarcJoan/django | refs/heads/master | tests/template_tests/filter_tests/test_force_escape.py | 352 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import force_escape
from django.test import SimpleTestCase
from django.utils.safestring import SafeData
from ..utils import setup
class ForceEscapeTests(SimpleTestCase):
"""
Force_escape is applied immediately. It can be used to provide
double-escaping, for example.
"""
@setup({'force-escape01': '{% autoescape off %}{{ a|force_escape }}{% endautoescape %}'})
def test_force_escape01(self):
output = self.engine.render_to_string('force-escape01', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape02': '{{ a|force_escape }}'})
def test_force_escape02(self):
output = self.engine.render_to_string('force-escape02', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape03': '{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}'})
def test_force_escape03(self):
output = self.engine.render_to_string('force-escape03', {"a": "x&y"})
self.assertEqual(output, "x&amp;y")
@setup({'force-escape04': '{{ a|force_escape|force_escape }}'})
def test_force_escape04(self):
output = self.engine.render_to_string('force-escape04', {"a": "x&y"})
self.assertEqual(output, "x&amp;y")
# Because the result of force_escape is "safe", an additional
# escape filter has no effect.
@setup({'force-escape05': '{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}'})
def test_force_escape05(self):
output = self.engine.render_to_string('force-escape05', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape06': '{{ a|force_escape|escape }}'})
def test_force_escape06(self):
output = self.engine.render_to_string('force-escape06', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape07': '{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}'})
def test_force_escape07(self):
output = self.engine.render_to_string('force-escape07', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'force-escape08': '{{ a|escape|force_escape }}'})
def test_force_escape08(self):
output = self.engine.render_to_string('force-escape08', {"a": "x&y"})
self.assertEqual(output, "x&y")
class FunctionTests(SimpleTestCase):
def test_escape(self):
escaped = force_escape('<some html & special characters > here')
self.assertEqual(escaped, '<some html & special characters > here')
self.assertIsInstance(escaped, SafeData)
def test_unicode(self):
self.assertEqual(
force_escape('<some html & special characters > here ĐÅ€£'),
'<some html & special characters > here \u0110\xc5\u20ac\xa3',
)
|
vikitripathi/MB-MessApp-API | refs/heads/master | messApp/env/lib/python2.7/site-packages/setuptools/command/easy_install.py | 109 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
from glob import glob
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import pkg_resources
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
sys_executable = os.environ.get('__PYVENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if sys.version_info <= (3,):
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
#XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,))
f.close()
f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
# First remove the dist from self.local_index, to avoid problems using
# old cached data in case its underlying file has been replaced.
#
# This is a quick-fix for a zipimporter caching issue in case the dist
# has been implemented as and already loaded from a zip file that got
# replaced later on. For more detailed information see setuptools issue
# #168 at 'http://bitbucket.org/pypa/setuptools/issue/168'.
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = "%r already exists in %s; build directory %s will not be kept"
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
def get_template(filename):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
These templates use triple-quotes to escape variable
substitutions so the scripts get the 2to3 treatment when build
on Python 3. The templates cannot use triple-quotes naturally.
"""
raw_bytes = resource_string('setuptools', template_name)
template_str = raw_bytes.decode('utf-8')
clean_template = template_str.replace('"""', '')
return clean_template
if is_script:
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
template_name = 'script template.py'
if dev_path:
template_name = template_name.replace('.py', ' (dev).py')
script_text = (get_script_header(script_text) +
get_template(template_name) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0o777-mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(_egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
f = open(txt,'w')
f.write('\n'.join(locals()[name])+'\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH',''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy,'rb')
current = f.read()
# we want str, not bytes
if sys.version_info >= (3,):
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs: sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
contents = z.read(name)
if sys.version_info >= (3,):
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename,'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() # account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
first = (script_text+'\n').splitlines()[0]
match = _first_line_re().match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg))))
def uncache_zipdir(path):
"""
Remove any globally cached zip file related data for `path`
Stale zipimport.zipimporter objects need to be removed when a zip file is
replaced as they contain cached zip file directory information. If they are
asked to get data from their zip file, they will use that cached
information to calculate the data location in the zip file. This calculated
location may be incorrect for the replaced zip file, which may in turn
cause the read operation to either fail or return incorrect data.
Note we have no way to clear any local caches from here. That is left up to
whomever is in charge of maintaining that cache.
"""
normalized_path = normalize_path(path)
_uncache(normalized_path, zipimport._zip_directory_cache)
_uncache(normalized_path, sys.path_importer_cache)
def _uncache(normalized_path, cache):
to_remove = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
to_remove.append(p)
for p in to_remove:
del cache[p]
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform=='win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header+script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield name+ext, header+script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_=='gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py','.pyc','.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield (name+ext, hdr+script_text, 't', blockers)
yield (
name+'.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower()=='arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if sys.version_info[0] < 3:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
|
tumbl3w33d/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_pool.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_pool
short_description: Manages F5 BIG-IP LTM pools
description:
- Manages F5 BIG-IP LTM pools via iControl REST API.
version_added: 1.2
options:
description:
description:
- Specifies descriptive text that identifies the pool.
type: str
version_added: 2.3
name:
description:
- Pool name
type: str
required: True
aliases:
- pool
lb_method:
description:
- Load balancing method. When creating a new pool, if this value is not
specified, the default of C(round-robin) will be used.
type: str
version_added: 1.3
choices:
- dynamic-ratio-member
- dynamic-ratio-node
- fastest-app-response
- fastest-node
- least-connections-member
- least-connections-node
- least-sessions
- observed-member
- observed-node
- predictive-member
- predictive-node
- ratio-least-connections-member
- ratio-least-connections-node
- ratio-member
- ratio-node
- ratio-session
- round-robin
- weighted-least-connections-member
- weighted-least-connections-node
monitor_type:
description:
- Monitor rule type when C(monitors) is specified.
- When creating a new pool, if this value is not specified, the default
of 'and_list' will be used.
- When C(single) ensures that all specified monitors are checked, but
additionally includes checks to make sure you only specified a single
monitor.
- When C(and_list) ensures that B(all) monitors are checked.
- When C(m_of_n) ensures that C(quorum) of C(monitors) are checked. C(m_of_n)
B(requires) that a C(quorum) of 1 or greater be set either in the playbook,
or already existing on the device.
- Both C(single) and C(and_list) are functionally identical since BIG-IP
considers all monitors as "a list".
type: str
choices:
- and_list
- m_of_n
- single
version_added: 1.3
quorum:
description:
- Monitor quorum value when C(monitor_type) is C(m_of_n).
- Quorum must be a value of 1 or greater when C(monitor_type) is C(m_of_n).
type: int
version_added: 1.3
monitors:
description:
- Monitor template name list. If the partition is not provided as part of
the monitor name, then the C(partition) option will be used instead.
type: list
version_added: 1.3
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members.
type: int
version_added: 1.3
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure.
type: int
version_added: 2.2
service_down_action:
description:
- Sets the action to take when node goes down in pool.
type: str
choices:
- none
- reset
- drop
- reselect
version_added: 1.3
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
state:
description:
- When C(present), guarantees that the pool exists with the provided
attributes.
- When C(absent), removes the pool from the system.
type: str
choices:
- absent
- present
default: present
version_added: 2.5
metadata:
description:
- Arbitrary key/value pairs that you can attach to a pool. This is useful in
situations where you might want to annotate a pool to me managed by Ansible.
- Key names will be stored as strings; this includes names that are numbers.
- Values for all of the keys will be stored as strings; this includes values
that are numbers.
- Data will be persisted, not ephemeral.
type: raw
version_added: 2.5
priority_group_activation:
description:
- Specifies whether the system load balances traffic according to the priority
number assigned to the pool member.
- When creating a new pool, if this parameter is not specified, the default of
C(0) will be used.
- To disable this setting, provide the value C(0).
- Once you enable this setting, you can specify pool member priority when you
create a new pool or on a pool member's properties screen.
- The system treats same-priority pool members as a group.
- To enable priority group activation, provide a number from C(0) to C(65535)
that represents the minimum number of members that must be available in one
priority group before the system directs traffic to members in a lower
priority group.
- When a sufficient number of members become available in the higher priority
group, the system again directs traffic to the higher priority group.
type: int
aliases:
- minimum_active_members
version_added: 2.6
aggregate:
description:
- List of pool definitions to be created, modified or removed.
- When using C(aggregates) if one of the aggregate definitions is invalid, the aggregate run will fail,
indicating the error it last encountered.
- The module will C(NOT) rollback any changes it has made prior to encountering the error.
- The module also will not indicate what changes were made prior to failure, therefore it is strongly advised
to run the module in check mode to make basic validation, prior to module execution.
type: list
aliases:
- pools
version_added: 2.8
replace_all_with:
description:
- Remove pools not defined in the C(aggregate) parameter.
- This operation is all or none, meaning that it will stop if there are some pools
that cannot be removed.
type: bool
default: no
aliases:
- purge
version_added: 2.8
notes:
- To add members to a pool, use the C(bigip_pool_member) module. Previously, the
C(bigip_pool) module allowed the management of members, but this has been removed
in version 2.5 of Ansible.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create pool
bigip_pool:
state: present
name: my-pool
partition: Common
lb_method: least-connections-member
slow_ramp_time: 120
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
state: present
name: my-pool
partition: Common
lb_method: round-robin
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Set a single monitor (with enforcement)
bigip_pool:
state: present
name: my-pool
partition: Common
monitor_type: single
monitors:
- http
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Set a single monitor (without enforcement)
bigip_pool:
state: present
name: my-pool
partition: Common
monitors:
- http
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Set multiple monitors (all must succeed)
bigip_pool:
state: present
name: my-pool
partition: Common
monitor_type: and_list
monitors:
- http
- tcp
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Set multiple monitors (at least 1 must succeed)
bigip_pool:
state: present
name: my-pool
partition: Common
monitor_type: m_of_n
quorum: 1
monitors:
- http
- tcp
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Delete pool
bigip_pool:
state: absent
name: my-pool
partition: Common
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add metadata to pool
bigip_pool:
state: present
name: my-pool
partition: Common
metadata:
ansible: 2.4
updated_at: 2017-12-20T17:50:46Z
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add pools Aggregate
bigip_pool:
aggregate:
- name: my-pool
partition: Common
lb_method: least-connections-member
slow_ramp_time: 120
- name: my-pool2
partition: Common
lb_method: least-sessions
slow_ramp_time: 120
- name: my-pool3
partition: Common
lb_method: round-robin
slow_ramp_time: 120
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add pools Aggregate, purge others
bigip_pool:
aggregate:
- name: my-pool
partition: Common
lb_method: least-connections-member
slow_ramp_time: 120
- name: my-pool2
partition: Common
lb_method: least-sessions
slow_ramp_time: 120
- name: my-pool3
partition: Common
lb_method: round-robin
slow_ramp_time: 120
replace_all_with: yes
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
monitor_type:
description: The contact that was set on the datacenter.
returned: changed
type: str
sample: admin@root.local
quorum:
description: The quorum that was set on the pool.
returned: changed
type: int
sample: 2
monitors:
description: Monitors set on the pool.
returned: changed
type: list
sample: ['/Common/http', '/Common/gateway_icmp']
service_down_action:
description: Service down action that is set on the pool.
returned: changed
type: str
sample: reset
description:
description: Description set on the pool.
returned: changed
type: str
sample: Pool of web servers
lb_method:
description: The LB method set for the pool.
returned: changed
type: str
sample: round-robin
slow_ramp_time:
description: The new value that is set for the slow ramp-up time.
returned: changed
type: int
sample: 500
reselect_tries:
description: The new value that is set for the number of tries to contact member.
returned: changed
type: int
sample: 10
metadata:
description: The new value of the pool.
returned: changed
type: dict
sample: {'key1': 'foo', 'key2': 'bar'}
priority_group_activation:
description: The new minimum number of members to activate the priority group.
returned: changed
type: int
sample: 10
replace_all_with:
description: Purges all non-aggregate pools from device
returned: changed
type: bool
sample: yes
'''
import re
from copy import deepcopy
from ansible.module_utils.urls import urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import remove_default_spec
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.compare import cmp_str_with_none
from library.module_utils.network.f5.icontrol import TransactionContextManager
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.compare import cmp_str_with_none
from ansible.module_utils.network.f5.icontrol import TransactionContextManager
class Parameters(AnsibleF5Parameters):
api_map = {
'loadBalancingMode': 'lb_method',
'slowRampTime': 'slow_ramp_time',
'reselectTries': 'reselect_tries',
'serviceDownAction': 'service_down_action',
'monitor': 'monitors',
'minActiveMembers': 'priority_group_activation',
}
api_attributes = [
'description',
'name',
'loadBalancingMode',
'monitor',
'slowRampTime',
'reselectTries',
'serviceDownAction',
'metadata',
'minActiveMembers',
]
returnables = [
'monitor_type',
'quorum',
'monitors',
'service_down_action',
'description',
'lb_method',
'slow_ramp_time',
'reselect_tries',
'monitor',
'name',
'partition',
'metadata',
'priority_group_activation',
]
updatables = [
'monitor_type',
'quorum',
'monitors',
'service_down_action',
'description',
'lb_method',
'slow_ramp_time',
'reselect_tries',
'metadata',
'priority_group_activation',
]
@property
def lb_method(self):
lb_method = self._values['lb_method']
if lb_method is None:
return None
spec = ArgumentSpec()
if lb_method not in spec.lb_choice:
raise F5ModuleError('Provided lb_method is unknown')
return lb_method
def _verify_quorum_type(self, quorum):
try:
if quorum is None:
return None
return int(quorum)
except ValueError:
raise F5ModuleError(
"The specified 'quorum' must be an integer."
)
@property
def monitors(self):
if self._values['monitors'] is None:
return None
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.monitor_type == 'm_of_n':
monitors = ' '.join(monitors)
result = 'min %s of { %s }' % (self.quorum, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def priority_group_activation(self):
if self._values['priority_group_activation'] is None:
return None
return int(self._values['priority_group_activation'])
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
@property
def quorum(self):
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<quorum>\d+)\s+of'
matches = re.search(pattern, self._values['monitors'])
if matches:
quorum = matches.group('quorum')
else:
quorum = None
result = self._verify_quorum_type(quorum)
return result
@property
def monitor_type(self):
if self._values['monitors'] is None:
return None
pattern = r'min\s+\d+\s+of'
matches = re.search(pattern, self._values['monitors'])
if matches:
return 'm_of_n'
else:
return 'and_list'
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/[\w-]+/[^\s}]+', self._values['monitors'])
return result
except Exception:
return self._values['monitors']
@property
def metadata(self):
if self._values['metadata'] is None:
return None
result = []
for md in self._values['metadata']:
tmp = dict(name=str(md['name']))
if 'value' in md:
tmp['value'] = str(md['value'])
else:
tmp['value'] = ''
result.append(tmp)
return result
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
return self._values['monitors']
@property
def quorum(self):
if self._values['quorum'] is None:
return None
result = self._verify_quorum_type(self._values['quorum'])
return result
@property
def monitor_type(self):
if self._values['monitor_type'] is None:
return None
return self._values['monitor_type']
@property
def metadata(self):
if self._values['metadata'] is None:
return None
if self._values['metadata'] == '':
return []
result = []
try:
for k, v in iteritems(self._values['metadata']):
tmp = dict(name=str(k))
if v:
tmp['value'] = str(v)
else:
tmp['value'] = ''
result.append(tmp)
except AttributeError:
raise F5ModuleError(
"The 'metadata' parameter must be a dictionary of key/value pairs."
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
except Exception:
pass
result = self._filter_params(result)
return result
@property
def monitors(self):
if self._values['monitors'] is None:
return None
return self._values['monitors']
class UsableChanges(Changes):
@property
def monitors(self):
monitor_string = self._values['monitors']
if monitor_string is None:
return None
if '{' in monitor_string and '}':
tmp = monitor_string.strip('}').split('{')
monitor = ''.join(tmp).rstrip()
return monitor
return monitor_string
class ReportableChanges(Changes):
@property
def monitors(self):
result = sorted(re.findall(r'/[\w-]+/[^\s}]+', self._values['monitors']))
return result
@property
def monitor_type(self):
pattern = r'min\s+\d+\s+of'
matches = re.search(pattern, self._values['monitors'])
if matches:
return 'm_of_n'
else:
return 'and_list'
@property
def metadata(self):
result = dict()
for x in self._values['metadata']:
result[x['name']] = x['value']
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
def _monitors_and_quorum(self):
if self.want.monitor_type is None:
self.want.update(dict(monitor_type=self.have.monitor_type))
if self.want.monitor_type == 'm_of_n':
if self.want.quorum is None:
self.want.update(dict(quorum=self.have.quorum))
if self.want.quorum is None or self.want.quorum < 1:
raise F5ModuleError(
"Quorum value must be specified with monitor_type 'm_of_n'."
)
if self.want.monitors != self.have.monitors:
return dict(
monitors=self.want.monitors
)
elif self.want.monitor_type == 'and_list':
if self.want.quorum is not None and self.want.quorum > 0:
raise F5ModuleError(
"Quorum values have no effect when used with 'and_list'."
)
if self.want.monitors != self.have.monitors:
return dict(
monitors=self.want.monitors
)
elif self.want.monitor_type == 'single':
if len(self.want.monitors_list) > 1:
raise F5ModuleError(
"When using a 'monitor_type' of 'single', only one monitor may be provided."
)
elif len(self.have.monitors_list) > 1 and len(self.want.monitors_list) == 0:
# Handle instances where there already exists many monitors, and the
# user runs the module again specifying that the monitor_type should be
# changed to 'single'
raise F5ModuleError(
"A single monitor must be specified if more than one monitor currently exists on your pool."
)
# Update to 'and_list' here because the above checks are all that need
# to be done before we change the value back to what is expected by
# BIG-IP.
#
# Remember that 'single' is nothing more than a fancy way of saying
# "and_list plus some extra checks"
self.want.update(dict(monitor_type='and_list'))
if self.want.monitors != self.have.monitors:
return dict(
monitors=self.want.monitors
)
@property
def monitor_type(self):
return self._monitors_and_quorum()
@property
def quorum(self):
return self._monitors_and_quorum()
@property
def monitors(self):
if self.want.monitor_type is None:
self.want.update(dict(monitor_type=self.have.monitor_type))
if not self.want.monitors_list:
self.want.monitors = self.have.monitors_list
if not self.want.monitors and self.want.monitor_type is not None:
raise F5ModuleError(
"The 'monitors' parameter cannot be empty when 'monitor_type' parameter is specified"
)
if self.want.monitors != self.have.monitors:
return self.want.monitors
@property
def metadata(self):
if self.want.metadata is None:
return None
elif len(self.want.metadata) == 0 and self.have.metadata is None:
return None
elif len(self.want.metadata) == 0:
return []
elif self.have.metadata is None:
return self.want.metadata
result = self._diff_complex_items(self.want.metadata, self.have.metadata)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = None
self.have = None
self.changes = None
self.replace_all_with = None
self.purge_links = list()
def exec_module(self):
wants = None
if self.module.params['replace_all_with']:
self.replace_all_with = True
if self.module.params['aggregate']:
wants = self.merge_defaults_for_aggregate(self.module.params)
result = dict()
changed = False
if self.replace_all_with and self.purge_links:
self.purge()
changed = True
if self.module.params['aggregate']:
result['aggregate'] = list()
for want in wants:
output = self.execute(want)
if output['changed']:
changed = output['changed']
result['aggregate'].append(output)
else:
output = self.execute(self.module.params)
if output['changed']:
changed = output['changed']
result.update(output)
if changed:
result['changed'] = True
return result
def merge_defaults_for_aggregate(self, params):
defaults = deepcopy(params)
aggregate = defaults.pop('aggregate')
for i, j in enumerate(aggregate):
for k, v in iteritems(defaults):
if k != 'replace_all_with':
if j.get(k, None) is None and v is not None:
aggregate[i][k] = v
if self.replace_all_with:
self.compare_aggregate_names(aggregate)
return aggregate
def compare_aggregate_names(self, items):
on_device = self._read_purge_collection()
if not on_device:
return False
aggregates = [item['name'] for item in items]
collection = [item['name'] for item in on_device]
diff = set(collection) - set(aggregates)
if diff:
to_purge = [item['selfLink'] for item in on_device if item['name'] in diff]
self.purge_links.extend(to_purge)
def execute(self, params=None):
self.want = ModuleParameters(params=params)
self.have = ApiParameters()
self.changes = UsableChanges()
changed = False
result = dict()
state = params['state']
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Pool")
return True
def purge(self):
if self.module.check_mode:
return True
self.purge_from_device()
return True
def create(self):
if self.want.monitor_type is not None:
if not self.want.monitors_list:
raise F5ModuleError(
"The 'monitors' parameter cannot be empty when 'monitor_type' parameter is specified"
)
else:
if self.want.monitor_type is None:
self.want.update(dict(monitor_type='and_list'))
if self.want.monitor_type == 'm_of_n' and (self.want.quorum is None or self.want.quorum < 1):
raise F5ModuleError(
"Quorum value must be specified with monitor_type 'm_of_n'."
)
elif self.want.monitor_type == 'and_list' and self.want.quorum is not None and self.want.quorum > 0:
raise F5ModuleError(
"Quorum values have no effect when used with 'and_list'."
)
elif self.want.monitor_type == 'single' and len(self.want.monitors_list) > 1:
raise F5ModuleError(
"When using a 'monitor_type' of 'single', only one monitor may be provided"
)
if self.want.priority_group_activation is None:
self.want.update({'priority_group_activation': 0})
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _read_purge_collection(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$select=name,selfLink"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response:
return response['items']
return []
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
query = '?expandSubcollections=true'
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def _prepare_links(self, collection):
purge_links = list()
purge_paths = [urlparse(link).path for link in collection]
for path in purge_paths:
link = "https://{0}:{1}{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
path
)
purge_links.append(link)
return purge_links
def purge_from_device(self):
links = self._prepare_links(self.purge_links)
with TransactionContextManager(self.client) as transact:
for link in links:
resp = transact.api.delete(link)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
class ArgumentSpec(object):
def __init__(self):
self.lb_choice = [
'dynamic-ratio-member',
'dynamic-ratio-node',
'fastest-app-response',
'fastest-node',
'least-connections-member',
'least-connections-node',
'least-sessions',
'observed-member',
'observed-node',
'predictive-member',
'predictive-node',
'ratio-least-connections-member',
'ratio-least-connections-node',
'ratio-member',
'ratio-node',
'ratio-session',
'round-robin',
'weighted-least-connections-member',
'weighted-least-connections-node'
]
self.supports_check_mode = True
element_spec = dict(
name=dict(
aliases=['pool']
),
lb_method=dict(
choices=self.lb_choice
),
monitor_type=dict(
choices=[
'and_list', 'm_of_n', 'single'
]
),
quorum=dict(
type='int'
),
monitors=dict(
type='list'
),
slow_ramp_time=dict(
type='int'
),
reselect_tries=dict(
type='int'
),
service_down_action=dict(
choices=[
'none', 'reset',
'drop', 'reselect'
]
),
description=dict(),
metadata=dict(type='raw'),
state=dict(
default='present',
choices=['present', 'absent']
),
priority_group_activation=dict(
type='int',
aliases=['minimum_active_members']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(
type='list',
elements='dict',
options=aggregate_spec,
aliases=['pools']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
replace_all_with=dict(
default='no',
type='bool',
aliases=['purge']
)
)
self.mutually_exclusive = [
['name', 'aggregate']
]
self.required_one_of = [
['name', 'aggregate']
]
self.argument_spec = {}
self.argument_spec.update(element_spec)
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
uaf/uaf | refs/heads/master | examples/pyuaf/client/how_to_read_a_node_on_an_unknown_server.py | 3 | # examples/pyuaf/client/how_to_read_a_node_on_an_unknown_server.py
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!! Start the demo server ($SDK/bin/uaservercpp) of Unified Automation before running this script !!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# When trying to interact with a third-party server (i.e. any server of which you don't know all details
# from), you must dynamically discover some "details".
# This example will show you how to connect and interact with such an "unknown" server.
#
# In general, what you *do* know is the IP address and port of the server. Together they are known as
# the "discoveryURL".
# You can provide a discovery URL like this:
import pyuaf
from pyuaf.util import Address, NodeId
from pyuaf.util import opcuaidentifiers
from pyuaf.client import Client
from pyuaf.client.settings import ClientSettings
# create a client named "myClient", and provide the discovery URL of the server (uaservercpp):
myClient = Client(ClientSettings("myClient", ["opc.tcp://localhost:48010"]))
# The client now discovers the "opc.tcp://localhost:48010", and fetches the application description
# of the UaServerCpp demo server.
# Let's print out what the client discovered:
listOfApplicationDescriptions = myClient.serversFound()
print(listOfApplicationDescriptions)
# It's a list of one server description (the demo server). It tells us the application URI, or in
# this case, the server uri:
SERVER_URI = listOfApplicationDescriptions[0].applicationUri
# Okay now that we know the server URI of the demo server, we can connect to it.
# We'll just dynamically browse the address space, and let the UAF do the connection for us:
rootNode = Address( NodeId(opcuaidentifiers.OpcUaId_RootFolder, 0), SERVER_URI )
firstLevelBrowseResult = myClient.browse([rootNode])
print("BROWSE RESULT:")
print(firstLevelBrowseResult)
# Note that the 0 is the namespace index for the OPC UA standard. It always corresponds to the
# OPC UA standard URI 'http://opcfoundation.org/UA/'
# Instead of the above, we therefore could have also specified the namespace URI:
#rootNode = Address(NodeId(opcuaidentifiers.OpcUaId_RootFolder,
# 'http://opcfoundation.org/UA/'),
# SERVER_URI)
# It woulnd't make a difference, as the UAF will automatically "translate" the namespace URI
# 'http://opcfoundation.org/UA/' to the namespace index 0 for you.
# The mapping between namespace URIs and namespace indexes is called the NamespaceArray.
# Let's print this mapping:
result = myClient.read(Address(NodeId(opcuaidentifiers.OpcUaId_Server_NamespaceArray,
pyuaf.util.constants.OPCUA_NAMESPACE_URI), # same as 'http://opcfoundation.org/UA/'
SERVER_URI ))
NAMESPACE_ARRAY = result.targets[0].data
print("NamespaceArray:")
for i in xrange(len(NAMESPACE_ARRAY)):
print("NamespaceIndex %d corresponds to NamespaceURI '%s'" %(i, NAMESPACE_ARRAY[i].value))
# Indeed, we can see above that namespace index 0 corresponds
# to namespace URI 'http://opcfoundation.org/UA/'!
# Now that you know the ServerURI and the namespace URIs, you still need to figure out the NodeId
# details for each node that you want to read/write/monitor/... You can use the browse functionality
# of PyUAF for this (see the how_to_browse_some_nodes.py example), or you can simply use UaExpert
# to know the details of the nodes.
#
# For instance, UaExpert tells us that the "Demo.SimulationSpeed" node has namespace index 2, and
# identifier "Demo.SimulationSpeed". So we can read this node easily:
address = Address(NodeId("Demo.SimulationSpeed", # NodeId identifier
2), # NodeId namespace index
SERVER_URI) # server URI
result = myClient.read([address])
print("Read result:")
print(result)
# Since the namespace array may change over time, it is however much preferred that you
# identify the namespace via the URI instead of the namespace index.
# Looking at the printed NamespaceArray above, we see that namespace index 2 corresponds
# to namespace URI 'http://www.unifiedautomation.com/DemoServer/'.
# So we can read the node like this instead:
address = Address(NodeId("Demo.SimulationSpeed", # NodeId identifier
'http://www.unifiedautomation.com/DemoServer/'), # NodeId namespace URI
SERVER_URI) # server URI
result = myClient.read([address])
print("Read result:")
print(result)
|
ivke/ArdTemp8 | refs/heads/master | ArduinoControl.py | 1 | import serial
from Calibration import *
class ArduinoControler(object):
"Class interacting with arduino, sending, getting data throu serial port."
def __init__(self,NSensor,DataLength):
" Initalize Arduino relevant data, number of sensors, data Length that is send via USB. Actual is dataLength +2 bytes for \r and \n"
self.NumberSensors=NSensor
self.DataLength=DataLength
# initalize serial port
try:
self.ser=serial.Serial(port = '/dev/ttyUSB0',baudrate = 9600) # open serial communication with arduino
self.ser.flushInput()
print self.ser.inWaiting()
sleep(10)
print self.ser.inWaiting()
self.ser.read(self.ser.inWaiting())
except self.ser.SerialException:
print "Arduino is not responding!!!"
# initalize
def SetSensorNumber(self,NSensor):
self.NumberSensors=NSensor
def GetSensorNumber(self):
self.ser.read()
def GetADCData(self):
" Reads value from ADC from arduino and returns calculated temperature as dictionary "
try:
self.ser.inWaiting()
except self.ser.SerialException:
print "Arduino is not responding!!!"
return None
sbuf=""
while self.ser.inWaiting():
buf=self.ser.read(1)
if buf is "\n" and self.ser.inWaiting()<(self.DataLength+2):
sbuf+=buf # adds to sum buffer
break # goes out of the loop remaining is left for next round
sbuf+=buf
self.lastSerialReading = sbuf
# code for extractin valid entries, all other are discharged
import re
patern="(?=\d{%i}\r\n)\d{%i}" % (self.DataLength,self.DataLength) # patern for regexpr matching it finds XXXXX\r\n and removes newline characters
self.ADCList=re.findall(patern,sbuf) # lists all numbers that matches wanted format
if self.ADCList is None: # if nothing was recived send None as output
print "No DATA avaliable on Serial buffer!!!"
# should triger exception
print self.ADCList
# make dict sensor number: ADC value to get latest readings and to organize it
self.ADCDict={int(x[0]):int(x[1:]) for x in self.ADCList}
def SetCmDArduino(self,cmd,Data):
""" Sending command and data to Arduino via Serial"""
try:
self.ser.flushOutput( ) #flush output of the serial
except serial.SerialException:
print "Arduino is not responding!!!"
return False
if not isinstance(Data,basestring): # check if Data is string
data=str(Data) #if not convert to string
else: data=Data
data="%02i%s\n"% (cmd,data) # formats the string output XX command rest is data and \n
print data
try:
self.ser.write(data)
except serial.SerialException:
print "Serial write Error!!!"
return False
return True
class TempControler(ArduinoControler):
""" """
def __init__(self,NSensor=8,DataLength=5):
super(TempControler,self).__init__(NSensor,DataLength) # inherits initfunction from parent - sets for 8 sensors with 10 bit ADC - 1 ditit for senzor number and 4 ditigs for value
self.kx,self.kint=ReadCalibratedSet() # read Data for temperature calculation (derived from calibration)
self.TempDict={}
def GetTemp(self,sensor):
""" To be implemented """
pass
def GetTempDict(self):
""" Gets data from Arduino ADC and transforms it to Temperature"""
self.GetADCData()
for k,n in self.ADCDict.items(): # loops over ADCDict and sets temperature using calcTemp function
self.TempDict[k]=self.calcTemp(k,n)
return self.TempDict
def SetTemp(self,sensor,temp): # sets target ADC value for wanted temperature on Sensor....
""" """
ADCV=self.calcADC(sensor,temp)
if ADCV>1023: # maximum value for 10 bit ADC
print "Temperature is set to hight!"
ADCV=1023
data="%i%04i" % (sensor,ADCV) # output for arduino
self.SetCmDArduino(10,data) # 10 is command for set temperature
def calcTemp(self,senzorNumber,bufval): # calculate temperature from ADC from calibration data
return round(bufval*self.kx[int(senzorNumber)]+self.kint[int(senzorNumber)])
def calcADC(self,senzorNumber,temp): # calculate set ADC from calibration data
return int(round((temp-self.kint[int(senzorNumber)])/self.kx[int(senzorNumber)],0))
def main():
tm=TempControler()
while True:
tm.GetTempDict()
print tm.TempDict
sleep(5)
if __name__ == "__main__":
main()
|
zooba/PTVS | refs/heads/master | Python/Product/Miniconda/Miniconda3-x64/Lib/webbrowser.py | 6 | #! /usr/bin/env python3
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import shutil
import sys
import subprocess
import threading
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_lock = threading.RLock()
_browsers = {} # Dictionary of available browser controllers
_tryorder = None # Preference order of available browsers
_os_preferred_browser = None # The preferred browser
def register(name, klass, instance=None, *, preferred=False):
"""Register a browser connector."""
with _lock:
if _tryorder is None:
register_standard_browsers()
_browsers[name.lower()] = [klass, instance]
# Preferred browsers go to the front of the list.
# Need to match to the default browser returned by xdg-settings, which
# may be of the form e.g. "firefox.desktop".
if preferred or (_os_preferred_browser and name in _os_preferred_browser):
_tryorder.insert(0, name)
else:
_tryorder.append(name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if _tryorder is None:
with _lock:
if _tryorder is None:
register_standard_browsers()
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
if _tryorder is None:
with _lock:
if _tryorder is None:
register_standard_browsers()
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, *, preferred=False):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not shutil.which(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, instance=controller, preferred=preferred)
return [None, controller]
return [None, None]
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True,
start_new_session=True)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
background = False
redirect_stdout = True
# In remote_args, %s will be replaced with the requested URL. %action will
# be replaced depending on the value of 'new' passed to open.
# remote_action is used for new=0 (open). If newwin is not None, it is
# used for new=1 (open_new). If newtab is not None, it is used for
# new=3 (open_new_tab). After both substitutions are made, any empty
# strings in the transformed remote_args list will be removed.
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = subprocess.DEVNULL
else:
# for TTY browsers, we need stdin/out
inout = None
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, start_new_session=True)
if remote:
# wait at most five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
try:
rc = p.wait(5)
# if remote call failed, open() will try direct invocation
return not rc
except subprocess.TimeoutExpired:
return True
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
args = [arg for arg in args if arg]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla browsers."""
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "-new-window"
remote_action_newtab = "-new-tab"
background = True
class Netscape(UnixBrowser):
"""Launcher class for Netscape browser."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Chrome(UnixBrowser):
"Launcher class for Google Chrome browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = subprocess.DEVNULL
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except OSError:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except OSError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# use xdg-open if around
if shutil.which("xdg-open"):
register("xdg-open", None, BackgroundBrowser("xdg-open"))
# The default GNOME3 browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gvfs-open"):
register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and shutil.which("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
if shutil.which("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla browsers
for browser in ("firefox", "iceweasel", "iceape", "seamonkey"):
if shutil.which(browser):
register(browser, None, Mozilla(browser))
# The Netscape and old Mozilla browsers
for browser in ("mozilla-firefox",
"mozilla-firebird", "firebird",
"mozilla", "netscape"):
if shutil.which(browser):
register(browser, None, Netscape(browser))
# Konqueror/kfm, the KDE browser.
if shutil.which("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif shutil.which("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if shutil.which(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if shutil.which("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
if shutil.which(browser):
register(browser, None, Chrome(browser))
# Opera, quite popular
if shutil.which("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if shutil.which("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if shutil.which("grail"):
register("grail", Grail, None)
def register_standard_browsers():
global _tryorder
_tryorder = []
if sys.platform == 'darwin':
register("MacOSX", None, MacOSXOSAScript('default'))
register("chrome", None, MacOSXOSAScript('chrome'))
register("firefox", None, MacOSXOSAScript('firefox'))
register("safari", None, MacOSXOSAScript('safari'))
# OS X can use below Unix support (but we prefer using the OS X
# specific stuff)
if sys.platform[:3] == "win":
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if shutil.which(browser):
register(browser, None, BackgroundBrowser(browser))
else:
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
try:
cmd = "xdg-settings get default-web-browser".split()
raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
result = raw_result.decode().strip()
except (FileNotFoundError, subprocess.CalledProcessError):
pass
else:
global _os_preferred_browser
_os_preferred_browser = result
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
if shutil.which("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if shutil.which("links"):
register("links", None, GenericBrowser("links"))
if shutil.which("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if shutil.which("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if shutil.which("w3m"):
register("w3m", None, GenericBrowser("w3m"))
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
userchoices = os.environ["BROWSER"].split(os.pathsep)
userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, preferred=True)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), preferred=True)
# what to do if _tryorder is now empty?
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except OSError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print(usage, file=sys.stderr)
sys.exit(1)
url = args[0]
open(url, new_win)
print("\a")
if __name__ == "__main__":
main()
|
farhan0581/scrapy | refs/heads/master | scrapy/commands/version.py | 60 | from __future__ import print_function
import sys
import platform
import twisted
import OpenSSL
import scrapy
from scrapy.commands import ScrapyCommand
class Command(ScrapyCommand):
def syntax(self):
return "[-v]"
def short_desc(self):
return "Print Scrapy version"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
help="also display twisted/python/platform info (useful for bug reports)")
def run(self, args, opts):
if opts.verbose:
import lxml.etree
lxml_version = ".".join(map(str, lxml.etree.LXML_VERSION))
libxml2_version = ".".join(map(str, lxml.etree.LIBXML_VERSION))
print("Scrapy : %s" % scrapy.__version__)
print("lxml : %s" % lxml_version)
print("libxml2 : %s" % libxml2_version)
print("Twisted : %s" % twisted.version.short())
print("Python : %s" % sys.version.replace("\n", "- "))
print("pyOpenSSL : %s" % self._get_openssl_version())
print("Platform : %s" % platform.platform())
else:
print("Scrapy %s" % scrapy.__version__)
def _get_openssl_version(self):
try:
openssl = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION)\
.decode('ascii', errors='replace')
# pyOpenSSL 0.12 does not expose openssl version
except AttributeError:
openssl = 'Unknown OpenSSL version'
return '{} ({})'.format(OpenSSL.version.__version__, openssl)
|
audihsu-qci/ONL | refs/heads/master | components/all/platform-config/x86-64-quanta-ly6-rangeley-r0/src/python/onlpc.py | 5 | #!/usr/bin/python
############################################################
# <bsn.cl fy=2013 v=none>
#
# Copyright 2013, 2014 BigSwitch Networks, Inc.
# Copyright 2015 Quanta Computer Inc.
#
#
#
# </bsn.cl>
############################################################
#
# Platform driver for the Quanta LY6.
#
############################################################
import subprocess
from onl.platform.base import *
from onl.vendor.quanta import *
import os
import shutil
class OpenNetworkPlatformImplementation(OpenNetworkPlatformQuanta):
def _eeprom_file(self):
return "/sys/devices/pci0000:00/0000:00:1f.3/i2c-0/i2c-27/27-0054/eeprom"
def model(self):
return "LY6-Rangeley"
def platform(self):
return "x86-64-quanta-ly6-rangeley-r0"
def _plat_info_dict(self):
return {
platinfo.PORT_COUNT : 32,
}
def sys_init(self):
pass
def sys_oid_platform(self):
return ".6.1"
def baseconfig(self):
try:
files = os.listdir("%s/etc/init.d" % self.platform_basedir())
for file in files:
src = "%s/etc/init.d/%s" % (self.platform_basedir(), file)
dst = "/etc/init.d/%s" % file
os.system("cp -f %s %s" % (src, dst))
os.system("/usr/sbin/update-rc.d %s defaults" % file)
except:
pass
# make ds1339 as default rtc
os.system("ln -snf /dev/rtc1 /dev/rtc")
os.system("hwclock --hctosys")
# set system led to green
os.system("%s/sbin/systemled green" % self.platform_basedir())
return True
if __name__ == "__main__":
print OpenNetworkPlatformImplementation()
|
prefetchnta/questlab | refs/heads/master | bin/x64bin/python/37/Lib/datetime.py | 1 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
import sys
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
# -1 is a placeholder for indexing purposes.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us, timespec='auto'):
specs = {
'hours': '{:02d}',
'minutes': '{:02d}:{:02d}',
'seconds': '{:02d}:{:02d}:{:02d}',
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
}
if timespec == 'auto':
# Skip trailing microseconds when us==0.
timespec = 'microseconds' if us else 'seconds'
elif timespec == 'milliseconds':
us //= 1000
try:
fmt = specs[timespec]
except KeyError:
raise ValueError('Unknown timespec value')
else:
return fmt.format(hh, mm, ss, us)
def _format_offset(off):
s = ''
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
mm, ss = divmod(mm, timedelta(minutes=1))
s += "%s%02d:%02d" % (sign, hh, mm)
if ss or ss.microseconds:
s += ":%02d" % ss.seconds
if ss.microseconds:
s += '.%06d' % ss.microseconds
return s
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, rest = divmod(offset, timedelta(hours=1))
m, rest = divmod(rest, timedelta(minutes=1))
s = rest.seconds
u = offset.microseconds
if u:
zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u)
elif s:
zreplace = '%c%02d%02d%02d' % (sign, h, m, s)
else:
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
# Helpers for parsing the result of isoformat()
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos+2])
pos += 2
next_char = tstr[pos:pos+1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be strictly between "
"-timedelta(hours=24) and timedelta(hours=24)" %
(name, offset))
def _check_int_field(value):
if isinstance(value, int):
return value
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return value
raise TypeError('__int__ returned non-int (type %s)' %
type(value).__name__)
raise TypeError('an integer is required (got type %s)' %
type(value).__name__)
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond, fold):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
if fold not in (0, 1):
raise ValueError('fold must be either 0 or 1', fold)
return hour, minute, second, microsecond, fold
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds = round(microseconds + usdouble)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
else:
microseconds = int(microseconds)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
args = []
if self._days:
args.append("days=%d" % self._days)
if self._seconds:
args.append("seconds=%d" % self._seconds)
if self._microseconds:
args.append("microseconds=%d" % self._microseconds)
if not args:
args.append('0')
return "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
', '.join(args))
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
return NotImplemented
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (month is None and
isinstance(year, (bytes, str)) and len(year) == 4 and
1 <= ord(year[2:3]) <= 12):
# Pickle support
if isinstance(year, str):
try:
year = year.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a date from the output of date.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
assert len(date_string) == 10
return cls(*_parse_isoformat_date(date_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return type(self)(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> timedelta, positive for east of UTC, negative for west of UTC"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset as timedelta, positive for east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo, fold
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
fold (keyword only, default to zero)
"""
if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
ord(hour[0:1])&0x7F < 24):
# Pickle support
if isinstance(hour, str):
try:
hour = hour.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a time object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
return NotImplemented
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(t._getstate()[0])
else:
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self):
"""Return formatted timezone offset (+xx:xx) or an empty string."""
off = self.utcoffset()
return _format_offset(off)
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def isoformat(self, timespec='auto'):
"""Return the time formatted according to ISO.
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond, timespec)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
@classmethod
def fromisoformat(cls, time_string):
"""Construct a time from the output of isoformat()."""
if not isinstance(time_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
return cls(*_parse_isoformat_time(time_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {time_string!r}')
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset as timedelta, positive east of UTC
(negative west of UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
# Pickle support.
def _getstate(self, protocol=3):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
h = self._hour
if self._fold and protocol > 3:
h += 128
basestate = bytes([h, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
h, self._minute, self._second, us1, us2, us3 = string
if h > 127:
self._fold = 1
self._hour = h - 128
else:
self._fold = 0
self._hour = h
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (time, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None, *, fold=0):
if (isinstance(year, (bytes, str)) and len(year) == 10 and
1 <= ord(year[2:3])&0x7F <= 12):
# Pickle support
if isinstance(year, str):
try:
year = bytes(year, 'latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a datetime object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
@classmethod
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is None:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
max_fold_seconds = 24 * 3600
# On Windows localtime_s throws an OSError for negative values,
# thus we can't perform fold detection for values of time less
# than the max time fold. See comments in _datetimemodule's
# version of this method for more details.
if t < max_fold_seconds and sys.platform.startswith("win"):
return result
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
trans = result - probe1 - timedelta(0, max_fold_seconds)
if trans.days < 0:
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
else:
result = tz.fromutc(result)
return result
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
return cls._fromtimestamp(t, tz is not None, tz)
@classmethod
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time, tzinfo=True):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
if tzinfo is True:
tzinfo = time.tzinfo
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
tzinfo, fold=time.fold)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
else:
time_components = [0, 0, 0, 0, None]
return cls(*(date_components + time_components))
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def _mktime(self):
"""Return integer POSIX timestamp."""
epoch = datetime(1970, 1, 1)
max_fold_seconds = 24 * 3600
t = (self - epoch) // timedelta(0, 1)
def local(u):
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
# Our goal is to solve t = local(u) for u.
a = local(t) - t
u1 = t - a
t1 = local(u1)
if t1 == t:
# We found one solution, but it may not be the one we need.
# Look for an earlier solution (if `fold` is 0), or a
# later one (if `fold` is 1).
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
b = local(u2) - u2
if a == b:
return u1
else:
b = t1 - u1
assert a != b
u2 = t - b
t2 = local(u2)
if t2 == t:
return u2
if t1 == t:
return u1
# We have found both offsets a and b, but neither t - a nor t - b is
# a solution. This means t is in the gap.
return (max, min)[self.fold](u1, u2)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
s = self._mktime()
return s + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo, fold=self.fold)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True,
*, fold=None):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
else:
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
delta = local - datetime(*_time.gmtime(ts)[:6])
zone = _time.strftime('%Z', localtm)
tz = timezone(delta, zone)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
return tz
def astimezone(self, tz=None):
if tz is None:
tz = self._local_timezone()
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
myoffset = mytz.utcoffset(self)
else:
myoffset = mytz.utcoffset(self)
if myoffset is None:
mytz = self.replace(tzinfo=None)._local_timezone()
myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T', timespec='auto'):
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond, timespec))
off = self.utcoffset()
tz = _format_offset(off)
if tz:
s += tz
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset as timedelta positive east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
# Assume that allow_mixed means that we are called from __eq__
if allow_mixed:
if myoff != self.replace(fold=not self.fold).utcoffset():
return 2
if otoff != other.replace(fold=not other.fold).utcoffset():
return 2
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if tzoff is None:
self._hashcode = hash(t._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self, protocol=3):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
m = self._month
if self._fold and protocol > 3:
m += 128
basestate = bytes([yhi, ylo, m, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, m, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
if m > 127:
self._fold = 1
self._month = m - 128
else:
self._fold = 0
self._month = m
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24).")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if isinstance(other, timezone):
return self._offset == other._offset
return NotImplemented
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s.%s(%r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset)
return "%s.%s(%r, %r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=24, microseconds=-1)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if not delta:
return 'UTC'
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes, rest = divmod(rest, timedelta(minutes=1))
seconds = rest.seconds
microseconds = rest.microseconds
if microseconds:
return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
f'.{microseconds:06d}')
if seconds:
return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
return f'UTC{sign}{hours:02d}:{minutes:02d}'
timezone.utc = timezone._create(timedelta(0))
# bpo-37642: These attributes are rounded to the nearest minute for backwards
# compatibility, even though the constructor will accept a wider range of
# values. This may change in the future.
timezone.min = timezone._create(-timedelta(hours=23, minutes=59))
timezone.max = timezone._create(timedelta(hours=23, minutes=59))
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
# Some time zone algebra. For a datetime x, let
# x.n = x stripped of its timezone -- its naive time.
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
# return None
# x.d = x.dst(), and assuming that doesn't raise an exception or
# return None
# x.s = x's standard offset, x.o - x.d
#
# Now some derived rules, where k is a duration (timedelta).
#
# 1. x.o = x.s + x.d
# This follows from the definition of x.s.
#
# 2. If x and y have the same tzinfo member, x.s = y.s.
# This is actually a requirement, an assumption we need to make about
# sane tzinfo classes.
#
# 3. The naive UTC time corresponding to x is x.n - x.o.
# This is again a requirement for a sane tzinfo class.
#
# 4. (x+k).s = x.s
# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
#
# 5. (x+k).n = x.n + k
# Again follows from how arithmetic is defined.
#
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
# (meaning that the various tzinfo methods exist, and don't blow up or return
# None when called).
#
# The function wants to return a datetime y with timezone tz, equivalent to x.
# x is already in UTC.
#
# By #3, we want
#
# y.n - y.o = x.n [1]
#
# The algorithm starts by attaching tz to x.n, and calling that y. So
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
# becomes true; in effect, we want to solve [2] for k:
#
# (y+k).n - (y+k).o = x.n [2]
#
# By #1, this is the same as
#
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
#
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
# Substituting that into [3],
#
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
# k - (y+k).s - (y+k).d = 0; rearranging,
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
# k = y.s - (y+k).d
#
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
# very large, since all offset-returning methods return a duration of magnitude
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
# be 0, so ignoring it has no consequence then.
#
# In any case, the new value is
#
# z = y + y.s [4]
#
# It's helpful to step back at look at [4] from a higher level: it's simply
# mapping from UTC to tz's standard time.
#
# At this point, if
#
# z.n - z.o = x.n [5]
#
# we have an equivalent time, and are almost done. The insecurity here is
# at the start of daylight time. Picture US Eastern for concreteness. The wall
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
# on the day DST starts. We want to return the 1:MM EST spelling because that's
# the only spelling that makes sense on the local wall clock.
#
# In fact, if [5] holds at this point, we do have the standard-time spelling,
# but that takes a bit of proof. We first prove a stronger result. What's the
# difference between the LHS and RHS of [5]? Let
#
# diff = x.n - (z.n - z.o) [6]
#
# Now
# z.n = by [4]
# (y + y.s).n = by #5
# y.n + y.s = since y.n = x.n
# x.n + y.s = since z and y are have the same tzinfo member,
# y.s = z.s by #2
# x.n + z.s
#
# Plugging that back into [6] gives
#
# diff =
# x.n - ((x.n + z.s) - z.o) = expanding
# x.n - x.n - z.s + z.o = cancelling
# - z.s + z.o = by #2
# z.d
#
# So diff = z.d.
#
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
# spelling we wanted in the endcase described above. We're done. Contrarily,
# if z.d = 0, then we have a UTC equivalent, and are also done.
#
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
# add to z (in effect, z is in tz's standard time, and we need to shift the
# local clock into tz's daylight time).
#
# Let
#
# z' = z + z.d = z + diff [7]
#
# and we can again ask whether
#
# z'.n - z'.o = x.n [8]
#
# If so, we're done. If not, the tzinfo class is insane, according to the
# assumptions we've made. This also requires a bit of proof. As before, let's
# compute the difference between the LHS and RHS of [8] (and skipping some of
# the justifications for the kinds of substitutions we've done several times
# already):
#
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
# - z.n + z.n - z.o + z'.o = cancel z.n
# - z.o + z'.o = #1 twice
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
# z'.d - z.d
#
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
# return z', not bothering to compute z'.d.
#
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
# would have to change the result dst() returns: we start in DST, and moving
# a little further into it takes us out of DST.
#
# There isn't a sane case where this can happen. The closest it gets is at
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
# standard time. Since that's what the local clock *does*, we want to map both
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
# in local time, but so it goes -- it's the way the local clock works.
#
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
# (correctly) concludes that z' is not UTC-equivalent to x.
#
# Because we know z.d said z was in daylight time (else [5] would have held and
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
# and we have stopped then), and there are only 2 possible values dst() can
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
# but the reasoning doesn't depend on the example -- it depends on there being
# two possible dst() outcomes, one zero and the other non-zero). Therefore
# z' must be in standard time, and is the spelling we want in this case.
#
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
# concerned (because it takes z' as being in standard time rather than the
# daylight time we intend here), but returning it gives the real-life "local
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
# tz.
#
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
# the 1:MM standard time spelling we want.
#
# So how can this break? One of the assumptions must be violated. Two
# possibilities:
#
# 1) [2] effectively says that y.s is invariant across all y belong to a given
# time zone. This isn't true if, for political reasons or continental drift,
# a region decides to change its base offset from UTC.
#
# 2) There may be versions of "double daylight" time where the tail end of
# the analysis gives up a step too early. I haven't thought about that
# enough to say.
#
# In any case, it's clear that the default fromutc() is strong enough to handle
# "almost all" time zones: so long as the standard offset is invariant, it
# doesn't matter if daylight time transition points change from year to year, or
# if daylight time is skipped in some years; it doesn't matter how large or
# small dst() may get within its bounds; and it doesn't even matter if some
# perverse time zone returns a negative dst()). So a breaking case must be
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
_check_date_fields, _check_int_field, _check_time_fields,
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
_date_class, _days_before_month, _days_before_year, _days_in_month,
_format_time, _format_offset, _is_leap, _isoweek1monday, _math,
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
_parse_hh_mm_ss_ff)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
from _datetime import __doc__
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.2/Lib/plat-irix5/SV.py | 14 | NTSC_XMAX = 640
NTSC_YMAX = 480
PAL_XMAX = 768
PAL_YMAX = 576
BLANKING_BUFFER_SIZE = 2
MAX_SOURCES = 2
# mode parameter for Bind calls
IN_OFF = 0 # No Video
IN_OVER = 1 # Video over graphics
IN_UNDER = 2 # Video under graphics
IN_REPLACE = 3 # Video replaces entire win
# mode parameters for LoadMap calls. Specifies buffer, always 256 entries
INPUT_COLORMAP = 0 # tuples of 8-bit RGB
CHROMA_KEY_MAP = 1 # tuples of 8-bit RGB
COLOR_SPACE_MAP = 2 # tuples of 8-bit RGB
GAMMA_MAP = 3 # tuples of 24-bit red values
# mode parameters for UseExclusive calls
INPUT = 0
OUTPUT = 1
IN_OUT = 2
# Format constants for the capture routines
RGB8_FRAMES = 0 # noninterleaved 8 bit 3:2:3 RBG fields
RGB32_FRAMES = 1 # 32-bit 8:8:8 RGB frames
YUV411_FRAMES = 2 # interleaved, 8:2:2 YUV format
YUV411_FRAMES_AND_BLANKING_BUFFER = 3
#
# sv.SetParam is passed variable length argument lists,
# consisting of <name, value> pairs. The following
# constants identify argument names.
#
_NAME_BASE = 1000
SOURCE = (_NAME_BASE + 0)
SOURCE1 = 0
SOURCE2 = 1
SOURCE3 = 2
COLOR = (_NAME_BASE + 1)
DEFAULT_COLOR = 0
USER_COLOR = 1
MONO = 2
OUTPUTMODE = (_NAME_BASE + 2)
LIVE_OUTPUT = 0
STILL24_OUT = 1
FREEZE = (_NAME_BASE + 3)
DITHER = (_NAME_BASE + 4)
OUTPUT_FILTER = (_NAME_BASE + 5)
HUE = (_NAME_BASE + 6)
GENLOCK = (_NAME_BASE + 7)
GENLOCK_OFF = 0
GENLOCK_ON = 1
GENLOCK_HOUSE = 2
BROADCAST = (_NAME_BASE + 8)
NTSC = 0
PAL = 1
VIDEO_MODE = (_NAME_BASE + 9)
COMP = 0
SVIDEO = 1
INPUT_BYPASS = (_NAME_BASE + 10)
FIELDDROP = (_NAME_BASE + 11)
SLAVE = (_NAME_BASE + 12)
APERTURE_FACTOR = (_NAME_BASE + 13)
AFACTOR_0 = 0
AFACTOR_QTR = 1
AFACTOR_HLF = 2
AFACTOR_ONE = 3
CORING = (_NAME_BASE + 14)
COR_OFF = 0
COR_1LSB = 1
COR_2LSB = 2
COR_3LSB = 3
APERTURE_BANDPASS = (_NAME_BASE + 15)
ABAND_F0 = 0
ABAND_F1 = 1
ABAND_F2 = 2
ABAND_F3 = 3
PREFILTER = (_NAME_BASE + 16)
CHROMA_TRAP = (_NAME_BASE + 17)
CK_THRESHOLD = (_NAME_BASE + 18)
PAL_SENSITIVITY = (_NAME_BASE + 19)
GAIN_CONTROL = (_NAME_BASE + 20)
GAIN_SLOW = 0
GAIN_MEDIUM = 1
GAIN_FAST = 2
GAIN_FROZEN = 3
AUTO_CKILL = (_NAME_BASE + 21)
VTR_MODE = (_NAME_BASE + 22)
VTR_INPUT = 0
CAMERA_INPUT = 1
LUMA_DELAY = (_NAME_BASE + 23)
VNOISE = (_NAME_BASE + 24)
VNOISE_NORMAL = 0
VNOISE_SEARCH = 1
VNOISE_AUTO = 2
VNOISE_BYPASS = 3
CHCV_PAL = (_NAME_BASE + 25)
CHCV_NTSC = (_NAME_BASE + 26)
CCIR_LEVELS = (_NAME_BASE + 27)
STD_CHROMA = (_NAME_BASE + 28)
DENC_VTBYPASS = (_NAME_BASE + 29)
FAST_TIMECONSTANT = (_NAME_BASE + 30)
GENLOCK_DELAY = (_NAME_BASE + 31)
PHASE_SYNC = (_NAME_BASE + 32)
VIDEO_OUTPUT = (_NAME_BASE + 33)
CHROMA_PHASEOUT = (_NAME_BASE + 34)
CHROMA_CENTER = (_NAME_BASE + 35)
YUV_TO_RGB_INVERT = (_NAME_BASE + 36)
SOURCE1_BROADCAST = (_NAME_BASE + 37)
SOURCE1_MODE = (_NAME_BASE + 38)
SOURCE2_BROADCAST = (_NAME_BASE + 39)
SOURCE2_MODE = (_NAME_BASE + 40)
SOURCE3_BROADCAST = (_NAME_BASE + 41)
SOURCE3_MODE = (_NAME_BASE + 42)
SIGNAL_STD = (_NAME_BASE + 43)
NOSIGNAL = 2
SIGNAL_COLOR = (_NAME_BASE + 44)
|
bgxavier/neutron | refs/heads/master | neutron/openstack/common/uuidutils.py | 166 | # Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
UUID related utilities and helper functions.
"""
import uuid
def generate_uuid():
return str(uuid.uuid4())
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
|
J861449197/edx-platform | refs/heads/master | openedx/core/djangoapps/user_api/tests/test_middleware.py | 152 | """Tests for user API middleware"""
from mock import Mock, patch
from unittest import TestCase
from django.http import HttpResponse
from django.test.client import RequestFactory
from student.tests.factories import UserFactory, AnonymousUserFactory
from ..tests.factories import UserCourseTagFactory
from ..middleware import UserTagsEventContextMiddleware
class TagsMiddlewareTest(TestCase):
"""
Test the UserTagsEventContextMiddleware
"""
def setUp(self):
super(TagsMiddlewareTest, self).setUp()
self.middleware = UserTagsEventContextMiddleware()
self.user = UserFactory.create()
self.other_user = UserFactory.create()
self.course_id = 'mock/course/id'
self.request_factory = RequestFactory()
# TODO: Make it so we can use reverse. Appears to fail depending on the order in which tests are run
#self.request = RequestFactory().get(reverse('courseware', kwargs={'course_id': self.course_id}))
self.request = RequestFactory().get('/courses/{}/courseware'.format(self.course_id))
self.request.user = self.user
self.response = Mock(spec=HttpResponse)
patcher = patch('openedx.core.djangoapps.user_api.middleware.tracker')
self.tracker = patcher.start()
self.addCleanup(patcher.stop)
def process_request(self):
"""
Execute process request using the request, and verify that it returns None
so that the request continues.
"""
# Middleware should pass request through
self.assertEquals(self.middleware.process_request(self.request), None)
def assertContextSetTo(self, context):
"""Asserts UserTagsEventContextMiddleware.CONTEXT_NAME matches ``context``"""
self.tracker.get_tracker.return_value.enter_context.assert_called_with( # pylint: disable=maybe-no-member
UserTagsEventContextMiddleware.CONTEXT_NAME,
context
)
def test_tag_context(self):
for key, value in (('int_value', 1), ('str_value', "two")):
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.user,
key=key,
value=value,
)
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.other_user,
key="other_user",
value="other_user_value"
)
UserCourseTagFactory.create(
course_id='other/course/id',
user=self.user,
key="other_course",
value="other_course_value"
)
self.process_request()
self.assertContextSetTo({
'course_id': self.course_id,
'course_user_tags': {
'int_value': '1',
'str_value': 'two',
}
})
def test_no_tags(self):
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_not_course_url(self):
self.request = self.request_factory.get('/not/a/course/url')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_invalid_course_id(self):
self.request = self.request_factory.get('/courses/edX/101/')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_anonymous_user(self):
self.request.user = AnonymousUserFactory()
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_remove_context(self):
get_tracker = self.tracker.get_tracker # pylint: disable=maybe-no-member
exit_context = get_tracker.return_value.exit_context
# The middleware should clean up the context when the request is done
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
exit_context.assert_called_with(UserTagsEventContextMiddleware.CONTEXT_NAME)
exit_context.reset_mock()
# Even if the tracker blows up, the middleware should still return the response
get_tracker.side_effect = Exception
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
|
haad/ansible | refs/heads/devel | lib/ansible/modules/messaging/rabbitmq_queue.py | 39 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether queue is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
required: false
choices: [ "yes", "no" ]
default: no
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
required: False
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
required: false
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
required: false
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
required: false
default: None
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
required: false
default: None
max_priority:
description:
- Maximum number of priority levels for the queue to support.
- If not set, the queue will not support message priorities.
- Larger numbers indicate higher priority.
required: false
default: None
version_added: "2.4"
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import json
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
login_user=dict(default='guest', type='str'),
login_password=dict(default='guest', type='str', no_log=True),
login_host=dict(default='localhost', type='str'),
login_port=dict(default='15672', type='str'),
vhost=dict(default='/', type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
message_ttl=dict(default=None, type='int'),
auto_expires=dict(default=None, type='int'),
max_length=dict(default=None, type='int'),
dead_letter_exchange=dict(default=None, type='str'),
dead_letter_routing_key=dict(default=None, type='str'),
arguments=dict(default=dict(), type='dict'),
max_priority=dict(default=None, type='int')
),
supports_check_mode=True
)
url = "http://%s:%s/api/queues/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
module.params['name']
)
if not HAS_REQUESTS:
module.fail_json(msg="requests library is required for this module. To install, use `pip install requests`")
result = dict(changed=False, name=module.params['name'])
# Check if queue already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']))
if r.status_code == 200:
queue_exists = True
response = r.json()
elif r.status_code == 404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if queue exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
) and
(
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
) and
(
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
) and
(
('x-dead-letter-exchange' in response['arguments'] and
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
('x-dead-letter-routing-key' in response['arguments'] and
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
) and
(
('x-max-priority' in response['arguments'] and
response['arguments']['x-max-priority'] == module.params['max_priority']) or
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
)
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k, v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key',
'max_priority': 'x-max-priority'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating queue",
status=r.status_code,
details=r.text
)
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
|
tualatrix/django-profile | refs/heads/master | userprofile/templatetags/avatars.original.py | 14 | # coding=UTF-8
from django.template import Library, Node, Template, TemplateSyntaxError, \
Variable
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as u_
from django.contrib.auth.models import User
from django.conf import settings
from userprofile import profile_settings as _settings
from userprofile.models import Profile
# from PythonMagick import Image
from utils.TuxieMagick import Image
from os import path, makedirs
from shutil import copy
register = Library()
class ResizedThumbnailNode(Node):
def __init__(self, size, username=None):
try:
self.size = int(size)
except:
self.size = Variable(size)
self.user = username
def get_user(self, context):
# If there's a username, go get it! Otherwise get the current.
if self.user:
try:
user = User.objects.get(username=self.user)
except:
user = Variable(self.user).resolve(context)
else:
user = Variable('user').resolve(context)
return user
def size_equals(self, file=None):
if not file:
return self.size == _settings.DEFAULT_AVATAR_WIDTH
else:
return self.size == Image(file).size().width()
def get_profile(self):
# Maybe django-profile it's not set as AUTH_PROFILE_MODULE
try:
profile = self.user.get_profile()
except Exception, e:
print e
if self.user.is_authenticated():
profile = Profile.objects.get(user=self.user)
else:
print "There is no user to get it's avatars for."
return ''
return profile
def get_file(self, profile=None):
# For compatibility with the official django-profile model I check
# whether it's a path or just a filename.
# In my opinion in the database should only be saved the file name,
# and all files be stored in a standard directory:
# settings.AVATAR_DIRS[int]/str(User)/settings_DEFAULT_AVATAR_WIDTH/
default = False
try:
file_root = path.join(settings.MEDIA_ROOT,
profile.avatar[:profile.avatar.rindex('/')+1])
file_name = profile.avatar[profile.avatar.rindex('/')+1:]
except:
file_root = _settings.AVATARS_DIR
if profile is not None and profile.avatar:
file_root = path.join(file_root, self.size)
file_name = profile.avatar
else:
file_name = _settings.DEFAULT_AVATAR
default = True
return (file_root, file_name, default)
def as_url(self, path):
try:
return path.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
except:
return ''
def render(self, context):
try:
# If size is not an int, then it's a Variable, so try to resolve it.
if not isinstance(self.size, int):
self.size = int(self.size.resolve(context))
self.user = self.get_user(context)
except Exception, e:
print e
return '' # just die...
if self.size > _settings.DEFAULT_AVATAR_WIDTH:
return '' # unacceptable
profile = self.get_profile()
if not profile:
return ''
# Avatar's heaven, where all the avatars go.
avatars_root = path.join(_settings.AVATARS_DIR,
slugify(self.user.username))
file_root, file_name, defaulting = self.get_file(profile)
if defaulting:
file_root = _settings.AVATARS_DIR
if self.size_equals():
return self.as_url(path.join(file_root, file_name))
file_path = path.join(file_root, file_name)
# I don't return the default because I have to resize it.
if not defaulting:
if path.exists(file_path) and self.size_equals(file_path):
return self.as_url(file_path)
else:
if not profile.avatar:
file_root = _settings.AVATARS_DIR
file_path = path.join(file_root, _settings.DEFAULT_AVATAR)
# Oops, I din't find it, let's try to generate it.
if path.exists(file_path):
orig_file = Image(file_path)
dest_root = path.join(avatars_root, str(self.size))
try:
makedirs(dest_root)
except Exception, e:
print e
# Save the new path for later...
dest_path = path.join(dest_root, file_name)
else:
# Did my best...
return '' # fail silently
orig_file.scale(self.size)
if orig_file.write(dest_path):
return self.as_url(dest_path)
else:
print '=== ERROR ==='
return '' # damn! Close but no cigar...
@register.tag('avatar')
def Thumbnail(parser, token):
bits = token.contents.split()
username = None
if len(bits) > 3:
raise TemplateSyntaxError, u_(u"You have to provide only the size as \
an integer (both sides will be equal) and optionally, the \
username.")
elif len(bits) == 3:
username = bits[2]
elif len(bits) < 2:
bits.append(_settings.DEFAULT_AVATAR_WIDTH)
return ResizedThumbnailNode(bits[1], username)
|
shanot/imp | refs/heads/develop | modules/misc/.imp_info.py | 2 | {
"name": "IMP.misc"
}
|
Passtechsoft/TPEAlpGen | refs/heads/master | blender/scons/scons-local/SCons/Variables/BoolVariable.py | 3 | """engine.SCons.Variables.BoolVariable
This file defines the option type for SCons implementing true/false values.
Usage example:
opts = Variables()
opts.Add(BoolVariable('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/BoolVariable.py 2014/07/05 09:42:21 garyo"
__all__ = ['BoolVariable',]
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectivly.
This is usable as 'converter' for SCons' Variables.
"""
lval = val.lower()
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Variables.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolVariable(key, help, default):
"""
The input parameters describe a boolen option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
sudasana/armcom | refs/heads/master | steamworks/interfaces/userstats.py | 1 | from ctypes import *
from enum import Enum
import steamworks.util as util
from steamworks.enums import *
from steamworks.structs import *
from steamworks.exceptions import *
class SteamUserStats(object):
_LeaderboardFindResult_t = CFUNCTYPE(None, FindLeaderboardResult_t)
_LeaderboardFindResult = None
def __init__(self, steam: object):
self.steam = steam
if not self.steam.loaded():
raise SteamNotLoadedException('STEAMWORKS not yet loaded')
def GetAchievement(self, name: str) -> bool:
"""Return true/false if use has given achievement
:param name: str
:return: bool
"""
return self.steam.GetAchievement(name)
def GetNumAchievements(self) -> int:
"""Get the number of achievements defined in the App Admin panel of the Steamworks website.
:return: int
"""
return self.steam.GetNumAchievements()
def GetAchievementName(self, index: int) -> str:
"""Gets the 'API name' for an achievement index between 0 and GetNumAchievements.
:param index: int
:return: str
"""
return self.steam.GetAchievementName(index)
def GetAchievementDisplayAttribute(self, name: str, key: str) -> str:
"""Get general attributes for an achievement. Currently provides: Name, Description, and Hidden status.
:param name: str
:param key: str
:return: str
"""
return self.steam.GetAchievementDisplayAttribute(name, key)
def GetStatFloat(self, name: str) -> float:
"""Get the value of a float statistic
:param name: str
:return: float
"""
return self.steam.GetStatFloat(name)
def GetStatInt(self, name: str) -> float:
"""Get the value of an integer statistic
:param name: str
:return: float
"""
return self.steam.GetStatInt(name)
def ResetAllStats(self, achievements: bool) -> bool:
"""Reset all Steam statistics; optional to reset achievements
:param achievements: bool
:return: bool
"""
return self.steam.ResetAllStats(achievements)
def RequestCurrentStats(self) -> bool:
"""Request all statistics and achievements from Steam servers
:return: bool
"""
return self.steam.RequestCurrentStats()
def SetAchievement(self, name: str) -> bool:
"""Set a given achievement
:param name: str
:return: bool
"""
return self.steam.SetAchievement(name)
def SetStat(self, name: str, value: object) -> bool:
"""Set a statistic
:param name: str
:param value: float, int
:return:
"""
if isinstance(value, float):
return self.steam.SetStatFloat(name, value)
elif isinstance(value, int):
return self.steam.SetStatInt(name, value)
else:
raise UnsupportedSteamStatValue("SetStat value can be only int or float")
def StoreStats(self) -> bool:
"""Store all statistics, and achievements, on Steam servers; must be called to "pop" achievements
:return: bool
"""
return self.steam.StoreStats()
def ClearAchievement(self, name: str) -> bool:
"""Clears a given achievement
:param name: str
:return: bool
"""
return self.steam.ClearAchievement(name)
def SetFindLeaderboardResultCallback(self, callback: object) -> bool:
"""Set callback for when leaderboard search result becomes available
:param callback: callable
:return: bool
"""
self._LeaderboardFindResult = self._LeaderboardFindResult_t(callback)
self.steam.Leaderboard_SetFindLeaderboardResultCallback(self._LeaderboardFindResult)
return True
def FindLeaderboard(self, name: str, callback: object = None, override_callback: bool = False) -> bool:
"""Find Leaderboard by name
:param name: str
:param callback: callable
:param override_callback: bool
:return: bool
"""
if callback:
if self._LeaderboardFindResult and override_callback:
self.SetFindLeaderboardResultCallback(callback)
else:
self.SetFindLeaderboardResultCallback(callback)
Steam.cdll.Leaderboard_FindLeaderboard(name.encode())
return True |
GiovanniConserva/TestDeploy | refs/heads/master | venv/Lib/encodings/gb18030.py | 816 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
sassoftware/anaconda | refs/heads/master | pyanaconda/ui/gui/hubs/__init__.py | 2 | # Base classes for Hubs.
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
import os
from gi.repository import GLib
from pyanaconda.flags import flags
from pyanaconda.i18n import _, C_
from pyanaconda.product import distributionText
from pyanaconda.ui import common
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.categories import collect_categories
from pyanaconda.ui.gui.spokes import StandaloneSpoke, collect_spokes
from pyanaconda.ui.gui.utils import gtk_call_once, escape_markup
from pyanaconda.constants import ANACONDA_ENVIRON
import logging
log = logging.getLogger("anaconda")
class Hub(GUIObject, common.Hub):
"""A Hub is an overview UI screen. A Hub consists of one or more grids of
configuration options that the user may choose from. Each grid is
provided by a SpokeCategory, and each option is provided by a Spoke.
When the user dives down into a Spoke and is finished interacting with
it, they are returned to the Hub.
Some Spokes are required. The user must interact with all required
Spokes before they are allowed to proceed to the next stage of
installation.
From a layout perspective, a Hub is the entirety of the screen, though
the screen itself can be roughly divided into thirds. The top third is
some basic navigation information (where you are, what you're
installing). The middle third is the grid of Spokes. The bottom third
is an action area providing additional buttons (quit, continue) or
progress information (during package installation).
Installation may consist of multiple chained Hubs, or Hubs with
additional standalone screens either before or after them.
"""
def __init__(self, data, storage, payload, instclass):
"""Create a new Hub instance.
The arguments this base class accepts defines the API that Hubs
have to work with. A Hub does not get free reign over everything
in the anaconda class, as that would be a big mess. Instead, a
Hub may count on the following:
ksdata -- An instance of a pykickstart Handler object. The
Hub uses this to populate its UI with defaults
and to pass results back after it has run.
storage -- An instance of storage.Storage. This is useful for
determining what storage devices are present and how
they are configured.
payload -- An instance of a packaging.Payload subclass. This
is useful for displaying and selecting packages to
install, and in carrying out the actual installation.
instclass -- An instance of a BaseInstallClass subclass. This
is useful for determining distribution-specific
installation information like default package
selections and default partitioning.
"""
GUIObject.__init__(self, data)
common.Hub.__init__(self, data, storage, payload, instclass)
# enable the autoContinue feature if we are in kickstart
# mode, but if the user interacts with the hub, it will be
# disabled again
self._autoContinue = flags.automatedInstall
self._incompleteSpokes = []
self._inSpoke = False
self._notReadySpokes = []
self._spokes = {}
self._checker = None
def _runSpoke(self, action):
from gi.repository import Gtk
# This duplicates code in widgets/src/BaseWindow.c, but we want to make sure
# maximize gets called every time a spoke is displayed to prevent the 25%
# UI from showing up.
action.window.maximize()
action.window.set_property("expand", True)
action.entry_logger()
action.refresh()
action.window.set_transient_for(self.window)
action.window.show_all()
# Start a recursive main loop for this spoke, which will prevent
# signals from going to the underlying (but still displayed) Hub and
# prevent the user from switching away. It's up to the spoke's back
# button handler to kill its own layer of main loop.
Gtk.main()
action.window.set_transient_for(None)
action._visitedSinceApplied = True
# Don't take _visitedSinceApplied into account here. It will always be
# True from the line above.
if action.changed and (not action.skipTo or (action.skipTo and action.applyOnSkip)):
action.apply()
action.execute()
action._visitedSinceApplied = False
action.exit_logger()
def _collectCategoriesAndSpokes(self):
"""collects categories and spokes to be displayed on this Hub
:return: dictionary mapping category class to list of spoke classes
:rtype: dictionary[category class] -> [ list of spoke classes ]
"""
ret = {}
# Collect all the categories this hub displays, then collect all the
# spokes belonging to all those categories.
categories = sorted(filter(lambda c: c.displayOnHub == self.__class__, collect_categories(self.paths["categories"])),
key=lambda c: c.sortOrder)
for c in categories:
ret[c] = collect_spokes(self.paths["spokes"], c.__name__)
return ret
def _createBox(self):
from gi.repository import Gtk, AnacondaWidgets
from pyanaconda.ui.gui.utils import setViewportBackground
cats_and_spokes = self._collectCategoriesAndSpokes()
categories = cats_and_spokes.keys()
grid = Gtk.Grid()
grid.set_row_spacing(6)
grid.set_column_spacing(6)
grid.set_column_homogeneous(True)
grid.set_margin_bottom(12)
row = 0
for c in sorted(categories, key=lambda c: c.title):
obj = c()
selectors = []
for spokeClass in sorted(cats_and_spokes[c], key=lambda s: s.title):
# Check if this spoke is to be shown in the supported environments
if not any(spokeClass.should_run(environ, self.data) for environ in self._environs):
continue
# Create the new spoke and populate its UI with whatever data.
# From here on, this Spoke will always exist.
spoke = spokeClass(self.data, self.storage, self.payload, self.instclass)
spoke.window.set_beta(self.window.get_beta())
spoke.window.set_property("distribution", distributionText().upper())
# If a spoke is not showable, it is unreachable in the UI. We
# might as well get rid of it.
#
# NOTE: Any kind of spoke can be unshowable.
if not spoke.showable:
del(spoke)
continue
# This allows being able to jump between two spokes without
# having to directly involve the hub.
self._spokes[spokeClass.__name__] = spoke
# If a spoke is indirect, it is reachable but not directly from
# a hub. This is for things like the custom partitioning spoke,
# which you can only get to after going through the initial
# storage configuration spoke.
#
# NOTE: This only makes sense for NormalSpokes. Other kinds
# of spokes do not involve a hub.
if spoke.indirect:
spoke.initialize()
continue
spoke.selector = AnacondaWidgets.SpokeSelector(C_("GUI|Spoke", spoke.title),
spoke.icon)
# Set all selectors to insensitive before initialize runs. The call to
# _updateCompleteness later will take care of setting it straight.
spoke.selector.set_sensitive(False)
spoke.initialize()
if not spoke.ready:
self._notReadySpokes.append(spoke)
# Set some default values on the associated selector that
# affect its display on the hub.
self._updateCompleteness(spoke)
spoke.selector.connect("button-press-event", self._on_spoke_clicked, spoke)
spoke.selector.connect("key-release-event", self._on_spoke_clicked, spoke)
# If this is a kickstart install, attempt to execute any provided ksdata now.
if flags.automatedInstall and spoke.ready and spoke.changed and \
spoke._visitedSinceApplied:
spoke.execute()
spoke._visitedSinceApplied = False
selectors.append(spoke.selector)
if not selectors:
continue
label = Gtk.Label("<span font-desc=\"Sans 14\">%s</span>" % escape_markup(_(obj.title)))
label.set_use_markup(True)
label.set_halign(Gtk.Align.START)
label.set_margin_top(12)
label.set_margin_bottom(12)
grid.attach(label, 0, row, 2, 1)
row += 1
col = 0
for selector in selectors:
selector.set_margin_left(12)
grid.attach(selector, col, row, 1, 1)
col = int(not col)
if col == 0:
row += 1
# If this category contains an odd number of selectors, the above
# row += 1 will not have run for the last row, which puts the next
# category's title in the wrong place.
if len(selectors) % 2:
row += 1
spokeArea = self.window.get_spoke_area()
viewport = Gtk.Viewport()
viewport.add(grid)
spokeArea.add(viewport)
setViewportBackground(viewport)
def _updateCompleteness(self, spoke, update_continue=True):
spoke.selector.set_sensitive(spoke.ready)
spoke.selector.set_property("status", spoke.status)
spoke.selector.set_tooltip_markup(escape_markup(spoke.status))
spoke.selector.set_incomplete(not spoke.completed and spoke.mandatory)
self._handleCompleteness(spoke, update_continue)
def _handleCompleteness(self, spoke, update_continue=True):
# Add the spoke to the incomplete list if it's now incomplete, and make
# sure it's not on the list if it's now complete. Then show the box if
# it's needed and hide it if it's not.
if not spoke.mandatory or spoke.completed:
if spoke in self._incompleteSpokes:
self._incompleteSpokes.remove(spoke)
else:
if spoke not in self._incompleteSpokes:
self._incompleteSpokes.append(spoke)
if update_continue:
self._updateContinue()
def _updateContinue(self):
self.clear_info()
if len(self._incompleteSpokes) == 0:
if self._checker and not self._checker.check():
self.set_warning(self._checker.error_message)
self.window.show_all()
else:
msg = _("Please complete items marked with this icon before continuing to the next step.")
self.set_warning(msg)
self.window.show_all()
self._updateContinueButton()
@property
def continuePossible(self):
return len(self._incompleteSpokes) == 0 and len(self._notReadySpokes) == 0 and getattr(self._checker, "success", True)
def _updateContinueButton(self):
if not self.continueButton:
return
self.continueButton.set_sensitive(self.continuePossible)
def _update_spokes(self):
from pyanaconda.ui.communication import hubQ
import Queue
q = hubQ.q
if not self._spokes and self.continueButton:
# no spokes, move on
log.info("no spokes available on %s, continuing automatically", self)
gtk_call_once(self.continueButton.emit, "clicked")
click_continue = False
# Grab all messages that may have appeared since last time this method ran.
while True:
try:
(code, args) = q.get(False)
except Queue.Empty:
break
# The first argument to all codes is the name of the spoke we are
# acting on. If no such spoke exists, throw the message away.
spoke = self._spokes.get(args[0], None)
if not spoke:
q.task_done()
continue
if code == hubQ.HUB_CODE_NOT_READY:
self._updateCompleteness(spoke)
if spoke not in self._notReadySpokes:
self._notReadySpokes.append(spoke)
self._updateContinueButton()
log.info("spoke is not ready: %s", spoke)
elif code == hubQ.HUB_CODE_READY:
self._updateCompleteness(spoke)
if spoke in self._notReadySpokes:
self._notReadySpokes.remove(spoke)
self._updateContinueButton()
log.info("spoke is ready: %s", spoke)
# If this is a real kickstart install (the kind with an input ks file)
# and all spokes are now completed, we should skip ahead to the next
# hub automatically. Take into account the possibility the user is
# viewing a spoke right now, though.
if flags.automatedInstall:
# Spokes that were not initially ready got the execute call in
# _createBox skipped. Now that it's become ready, do it. Note
# that we also provide a way to skip this processing (see comments
# communication.py) to prevent getting caught in a loop.
if not args[1] and spoke.changed and spoke._visitedSinceApplied:
spoke.execute()
spoke._visitedSinceApplied = False
if self.continuePossible:
if self._inSpoke:
self._autoContinue = False
elif self._autoContinue:
click_continue = True
elif code == hubQ.HUB_CODE_MESSAGE:
spoke.selector.set_property("status", args[1])
log.info("setting %s status to: %s", spoke, args[1])
q.task_done()
# queue is now empty, should continue be clicked?
if self._autoContinue and click_continue and self.continueButton:
# enqueue the emit to the Gtk message queue
log.info("_autoContinue clicking continue button")
gtk_call_once(self.continueButton.emit, "clicked")
return True
def refresh(self):
GUIObject.refresh(self)
self._createBox()
GLib.timeout_add(100, self._update_spokes)
@property
def continueButton(self):
return None
@property
def quitButton(self):
return None
### SIGNAL HANDLERS
def register_event_cb(self, event, cb):
if event == "continue" and self.continueButton:
self.continueButton.connect("clicked", lambda *args: cb())
elif event == "quit" and self.quitButton:
self.quitButton.connect("clicked", lambda *args: cb())
def _on_spoke_clicked(self, selector, event, spoke):
from gi.repository import Gdk
# This handler only runs for these two kinds of events, and only for
# activate-type keys (space, enter) in the latter event's case.
if event and not event.type in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.KEY_RELEASE]:
return
if event and event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval not in [Gdk.KEY_space, Gdk.KEY_Return, Gdk.KEY_ISO_Enter, Gdk.KEY_KP_Enter, Gdk.KEY_KP_Space]:
return
if selector:
selector.grab_focus()
# On automated kickstart installs, our desired behavior is to display
# the hub while background processes work, then skip to the progress
# hub immediately after everything's done.
# However if the user proves his intent to change the kickstarted
# values by entering any of the spokes, we need to disable the
# autoContinue feature and wait for the user to explicitly state
# that he is done configuring by pressing the continue button.
self._autoContinue = False
self._inSpoke = True
self._runSpoke(spoke)
self._inSpoke = False
# Now update the selector with the current status and completeness.
for sp in self._spokes.itervalues():
if not sp.indirect:
self._updateCompleteness(sp, update_continue=False)
self._updateContinue()
# And then if that spoke wants us to jump straight to another one,
# handle that now.
if spoke.skipTo and spoke.skipTo in self._spokes:
dest = spoke.skipTo
# Clear out the skipTo setting so we don't cycle endlessly.
spoke.skipTo = None
self._on_spoke_clicked(self._spokes[dest].selector, None, self._spokes[dest])
|
iulian787/spack | refs/heads/develop | var/spack/repos/builtin/packages/perl-devel-cycle/package.py | 5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlDevelCycle(PerlPackage):
"""Find memory cycles in objects"""
homepage = "http://search.cpan.org/~lds/Devel-Cycle-1.12/lib/Devel/Cycle.pm"
url = "http://search.cpan.org/CPAN/authors/id/L/LD/LDS/Devel-Cycle-1.12.tar.gz"
version('1.12', sha256='fd3365c4d898b2b2bddbb78a46d507a18cca8490a290199547dab7f1e7390bc2')
|
tjanez/ansible | refs/heads/devel | lib/ansible/modules/monitoring/zabbix_host.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name, used to authenticate against the server.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
required: false
version_added: '2.3'
host_groups:
description:
- List of host groups the host is part of.
required: false
link_templates:
description:
- List of templates linked to the host.
required: false
default: None
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
required: false
default: None
version_added: '2.1'
status:
description:
- Monitoring status of the host.
required: false
choices: ['enabled', 'disabled']
default: "enabled"
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
proxy:
description:
- The name of the Zabbix Proxy to be used
default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
force:
description:
- Overwrite the host configuration, even if already present
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: automatic
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]['proxyid']
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if host['proxy_hostid'] != proxy_id:
return True
if host['name'] != visible_name:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
if inventory_mode == "automatic":
inventory_mode = int(1)
elif inventory_mode == "manual":
inventory_mode = int(0)
elif inventory_mode == "disabled":
inventory_mode = int(-1)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# Use proxy specified, or set to None when updating host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = None
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
if not force:
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
# Use proxy specified, or set to 0 when adding new host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
maistrovas/Internet-Store | refs/heads/master | docs/__init__.py | 887 | # Included so that Django's startproject comment runs against the docs directory
|
atizo/pygobject | refs/heads/pygobject-2-15-1-atizo | tests/test_thread.py | 1 | import os
import unittest
from common import gobject, testhelper
main = gobject.MainLoop()
class TestThread(unittest.TestCase):
def from_thread_cb(self, test, enum):
assert test == self.obj
assert int(enum) == 0
assert type(enum) != int
def idle_cb(self):
self.obj = testhelper.get_test_thread()
self.obj.connect('from-thread', self.from_thread_cb)
self.obj.emit('emit-signal')
def testExtensionModule(self):
gobject.idle_add(self.idle_cb)
gobject.timeout_add(50, self.timeout_cb)
main.run()
def timeout_cb(self):
main.quit()
|
saketkc/bioconda-recipes | refs/heads/master | recipes/jasmine/jasmine.py | 22 | #!/usr/bin/env python
# Following example with added wrapper script: https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'Jasmine-1.1.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main() |
cynicaldevil/servo | refs/heads/master | tests/wpt/web-platform-tests/XMLHttpRequest/resources/accept-language.py | 250 | def main(request, response):
return [("Content-Type", "text/plain"),
request.headers.get("Accept-Language", "NO")]
|
vhf/confusable_homoglyphs | refs/heads/master | confusable_homoglyphs/__init__.py | 2 | # -*- coding: utf-8 -*-
from __future__ import print_function
from ._version import get_versions
__author__ = 'Victor Felder'
__email__ = 'victorfelder@gmail.com'
__version__ = get_versions()['version']
del get_versions
|
hrjn/scikit-learn | refs/heads/master | examples/svm/plot_svm_nonlinear.py | 62 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
fighterCui/L4ReFiascoOC | refs/heads/master | l4/pkg/python/contrib/Lib/encodings/punycode.py | 586 | # -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
geekboxzone/lollipop_external_chromium_org | refs/heads/geekbox | third_party/markdown/extensions/toc.py | 109 | # markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree
from .headerid import slugify, unique, itertext
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
if self.use_anchors:
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
self.add_anchor(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)
|
nadeemat/namebench | refs/heads/master | nb_third_party/jinja2/sandbox.py | 284 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.runtime import Undefined
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
FrameType, GeneratorType
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = xrange(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""
Mark a function or method as unsafe::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (CodeType, TracebackType, FrameType)):
return True
elif isinstance(obj, GeneratorType):
if attr == 'gi_frame':
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or \
getattr(obj, 'alters_data', False))
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
GiovanniConserva/TestDeploy | refs/heads/master | venv/Lib/encodings/hex_codec.py | 58 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
_is_text_encoding=False,
)
|
ENjOyAbLE1991/scrapy | refs/heads/master | scrapy/spiders/crawl.py | 78 | """
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical web sites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
import copy
import six
from scrapy.http import Request, HtmlResponse
from scrapy.utils.spider import iterate_spider_output
from scrapy.spiders import Spider
def identity(x):
return x
class Rule(object):
def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity):
self.link_extractor = link_extractor
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.process_links = process_links
self.process_request = process_request
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
class CrawlSpider(Spider):
rules = ()
def __init__(self, *a, **kw):
super(CrawlSpider, self).__init__(*a, **kw)
self._compile_rules()
def parse(self, response):
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_start_url(self, response):
return []
def process_results(self, response, results):
return results
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=n, link_text=link.text)
yield rule.process_request(r)
def _response_downloaded(self, response):
rule = self._rules[response.meta['rule']]
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def _parse_response(self, response, callback, cb_kwargs, follow=True):
if callback:
cb_res = callback(response, **cb_kwargs) or ()
cb_res = self.process_results(response, cb_res)
for requests_or_item in iterate_spider_output(cb_res):
yield requests_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _compile_rules(self):
def get_method(method):
if callable(method):
return method
elif isinstance(method, six.string_types):
return getattr(self, method, None)
self._rules = [copy.copy(r) for r in self.rules]
for rule in self._rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(CrawlSpider, cls).from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool(
'CRAWLSPIDER_FOLLOW_LINKS', True)
return spider
def set_crawler(self, crawler):
super(CrawlSpider, self).set_crawler(crawler)
self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
|
simodalla/mezzanine_nowait | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import nowait
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = nowait.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='Mezzanine NoWait',
version=version,
description="""Mezzanine app for manage public reservations for government
offices""",
long_description=readme + '\n\n' + history,
author='Simone Dalla',
author_email='simodalla@gmail.com',
url='https://github.com/simodalla/mezzanine_nowait',
packages=[
'nowait',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='mezzanine_nowait',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
|
mustafat/odoo-1 | refs/heads/8.0 | addons/note/tests/test_note.py | 427 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestNote(common.TransactionCase):
def test_bug_lp_1156215(self):
"""ensure any users can create new users"""
cr, uid = self.cr, self.uid
IMD = self.registry('ir.model.data')
Users = self.registry('res.users')
_, demo_user = IMD.get_object_reference(cr, uid, 'base', 'user_demo')
_, group_id = IMD.get_object_reference(cr, uid, 'base', 'group_erp_manager')
Users.write(cr, uid, [demo_user], {
'groups_id': [(4, group_id)],
})
# must not fail
Users.create(cr, demo_user, {
'name': 'test bug lp:1156215',
'login': 'lp_1156215',
})
|
hellhovnd/django | refs/heads/master | tests/generic_inline_admin/urls.py | 150 | from __future__ import absolute_import
from django.conf.urls import patterns, include
from . import admin
urlpatterns = patterns('',
(r'^generic_inline_admin/admin/', include(admin.site.urls)),
)
|
litchfield/django | refs/heads/master | django/contrib/messages/storage/fallback.py | 704 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
|
CubicERP/geraldo | refs/heads/master | site/newsite/django_1_0/django/middleware/common.py | 11 | import md5
import re
from django.conf import settings
from django import http
from django.core.mail import mail_managers
from django.utils.http import urlquote
from django.core import urlresolvers
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
try:
urlresolvers.resolve(request.path)
except urlresolvers.Resolver404:
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, (""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1])
if new_url != old_url:
# Redirect if the target url exists
try:
urlresolvers.resolve(new_url[1])
except urlresolvers.Resolver404:
pass
else:
if new_url[0]:
newurl = "%s://%s%s" % (
request.is_secure() and 'https' or 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.GET:
newurl += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(newurl)
return None
def process_response(self, request, response):
"Check for a flat page (for 404s) and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.get_host()
referer = request.META.get('HTTP_REFERER', None)
is_internal = _is_internal_request(domain, referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\nIP address: %s\n" \
% (referer, request.get_full_path(), ua, ip))
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
else:
etag = '"%s"' % md5.new(response.content).hexdigest()
if response.status_code >= 200 and response.status_code < 300 and request.META.get('HTTP_IF_NONE_MATCH') == etag:
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"Returns True if a 404 at the given URL *shouldn't* notify the site managers"
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
def _is_internal_request(domain, referer):
"Return true if the referring URL is the same domain as the current request"
# Different subdomains are treated as different domains.
return referer is not None and re.match("^https?://%s/" % re.escape(domain), referer)
|
mjfarmer/scada_py | refs/heads/master | env/lib/python2.7/site-packages/twisted/internet/iocpreactor/const.py | 84 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows constants for IOCP
"""
# this stuff should really be gotten from Windows headers via pyrex, but it
# probably is not going to change
ERROR_PORT_UNREACHABLE = 1234
ERROR_NETWORK_UNREACHABLE = 1231
ERROR_CONNECTION_REFUSED = 1225
ERROR_IO_PENDING = 997
ERROR_OPERATION_ABORTED = 995
WAIT_TIMEOUT = 258
ERROR_NETNAME_DELETED = 64
ERROR_HANDLE_EOF = 38
INFINITE = -1
SO_UPDATE_CONNECT_CONTEXT = 0x7010
SO_UPDATE_ACCEPT_CONTEXT = 0x700B
|
cloudwatt/contrail-controller | refs/heads/master | src/config/svc-monitor/svc_monitor/scheduler/__init__.py | 118 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
|
reingart/gui2py_googlecode | refs/heads/master | gui/doc/ext/autosummary/generate.py | 14 | # -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usable as a library or script to generate automatic RST source files for
items referred to in autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
sphinx-autogen -o source/generated source/*.rst
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import pydoc
import optparse
import inspect
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from ..autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
def main(argv=sys.argv):
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help="Directory to place all output in")
p.add_option("-s", "--suffix", action="store", type="string",
dest="suffix", default="rst",
help="Default suffix for files (default: %default)")
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
p.error('no input files given')
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
template_dir=options.templates)
def _simple_info(msg):
print msg
def _simple_warn(msg):
print >> sys.stderr, 'WARNING: ' + msg
# -- Generating output ---------------------------------------------------------
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None):
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
info('[autosummary] generating autosummary for: %s' %
', '.join(showed_sources))
if output_dir:
info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
name, obj, parent = import_by_name(name)
except ImportError, e:
warn('[autosummary] failed to import %r: %s' % (name, e))
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
f = open(fn, 'w')
try:
doc = get_documenter(obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst'
% doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[]):
items = []
for name in dir(obj):
if sys.skip_member(name, obj): continue
if typ in ['class', 'function']:
c = getattr(obj, name)
if inspect.isclass(c) or inspect.isfunction(c):
if (c.__module__!=obj.__name__+".base" and
c.__module__!=obj.__name__):
continue
try:
documenter = get_documenter(safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
def def_members(obj, typ, include_public=[]):
items = []
try:
obj_dict = safe_getattr(obj, '__dict__')
except AttributeError:
return []
defined = obj_dict.keys()
defined.sort()
for name in defined:
if sys.skip_member(name, obj): continue
try:
documenter = get_documenter(safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public
ns = {}
if doc.objtype == 'module':
ns['all_members'] = dir(obj)
ns['classes'], ns['all_classes'] = \
get_members(obj, 'class')
ns['functions'], ns['all_functions'] = \
get_members(obj, 'function')
ns['exceptions'], ns['all_exceptions'] = \
get_members(obj, 'exception')
documented = ns['classes']+ns['functions']+ns['exceptions']
if sys.all_submodules.has_key(obj.__name__):
ns['submodules'] = sys.all_submodules[obj.__name__]
documented += ns['submodules']
ns['members'] = ns['all_members']
try:
obj_dict = safe_getattr(obj, '__dict__')
except AttributeError:
obj_dict = []
public = [x for x in obj_dict if not x.startswith('_')]
for item in documented:
if item in public:
public.remove(item)
public.sort()
ns['members'] = public
ns['constants'] = [x for x in public
if not sys.skip_member(x, obj)]
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['events'], ns['all_events'] = \
get_members(obj, 'event')
ns['methods'], ns['all_methods'] = \
get_members(obj, 'method', ['__init__'])
ns['attributes'], ns['all_attributes'] = \
get_members(obj, 'attribute')
ns['def_events'] = def_members(obj, 'event')
ns['def_methods'] = def_members(obj, 'method', ['__init__'])
ns['def_attributes'] = def_members(obj, 'attribute')
ns['inherited'] = []
for t in ['events', 'methods', 'attributes']:
key = 'inh_' + t
ns[key]=[]
for item in ns[t]:
if not item in ns['def_' + t]:
ns['inherited'].append(item)
ns[key].append(item)
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered)
finally:
f.close()
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, output_dir=output_dir,
suffix=suffix, warn=warn, info=info,
base_path=base_path, builder=builder,
template_dir=template_dir)
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = []
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
f.close()
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
real_name, obj, parent = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = []
toctree = None
template = None
current_module = module
in_autosummary = False
base_indent = ""
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template))
continue
if not line.strip() or line.startswith(base_indent + " "):
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
base_indent = m.group(1)
toctree = None
template = None
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented
if __name__ == '__main__':
main()
|
murrown/cyder | refs/heads/master | vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/print_settings.py | 16 | """
print_settings
==============
Django command similar to 'diffsettings' but shows all active Django settings.
"""
from django.core.management.base import NoArgsCommand
from django.conf import settings
from optparse import make_option
class Command(NoArgsCommand):
"""print_settings command"""
help = "Print the active Django settings."
option_list = NoArgsCommand.option_list + (
make_option('--format', default='simple', dest='format',
help='Specifies output format.'),
make_option('--indent', default=4, dest='indent', type='int',
help='Specifies indent level for JSON and YAML'),
)
def handle_noargs(self, **options):
a_dict = {}
for attr in dir(settings):
if self.include_attr(attr):
value = getattr(settings, attr)
a_dict[attr] = value
output_format = options.get('format', 'json')
indent = options.get('indent', 4)
if output_format == 'json':
json = self.import_json()
print json.dumps(a_dict, indent=indent)
elif output_format == 'yaml':
import yaml # requires PyYAML
print yaml.dump(a_dict, indent=indent)
elif output_format == 'pprint':
from pprint import pprint
pprint(a_dict)
else:
self.print_simple(a_dict)
@staticmethod
def include_attr(attr):
"""Whether or not to include attribute in output"""
if attr.startswith('__'):
return False
else:
return True
@staticmethod
def print_simple(a_dict):
"""A very simple output format"""
for key, value in a_dict.items():
print('%-40s = %r' % (key, value))
@staticmethod
def import_json():
"""Import a module for JSON"""
try:
import json
except ImportError:
import simplejson as json
else:
return json
|
PolicyStat/GitZen | refs/heads/master | gitzen/enhancement_tracking/views.py | 2 | from datetime import datetime, timedelta
from time import mktime
from itertools import chain
from requests.exceptions import RequestException
from requests_oauth2 import OAuth2
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.forms import (
AuthenticationForm,
PasswordChangeForm,
SetPasswordForm
)
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from gitzen.enhancement_tracking.cache_actions import (
build_cache_index,
update_cache_index
)
from gitzen.enhancement_tracking.forms import (
NewUserForm,
NewGroupSuperuserForm,
NewAPIAccessDataForm,
ChangeAPIAccessDataForm,
UserProfileForm,
ActiveUserSelectionForm,
InactiveUserSelectionForm
)
from gitzen.enhancement_tracking.models import UserProfile
# Constant OAuth handler and authorization URL for access to GitHub's OAuth.
OAUTH2_HANDLER = OAuth2(settings.CLIENT_ID, settings.CLIENT_SECRET, site='https://github.com/',
redirect_uri='%s/confirm_git_oauth' % settings.ABSOLUTE_SITE_URL,
authorization_url='login/oauth/authorize',
token_url='login/oauth/access_token')
GIT_AUTH_URL = OAUTH2_HANDLER.authorize_url('repo')
# Email message that is sent to new users after a group superuser has created a
# user account for them in their group. The message prompts the user to change
# the random password that was assigned to their account upon creation.
NEW_USER_EMAIL_MESSAGE = \
"A user account has been created for you on GitZen for the product " \
"%(product_name)s. This account will allow you to track the progress of " \
"enhancments for this product as they move through different stages in " \
"GitHub and Zendesk.\n\n" \
"The username and password for your account are listed bellow. The " \
"password was automatically generated during your account's creation, " \
"so it is recommended that you change your password on the Change " \
"Account Settings page after logging into GitZen for the first time.\n\n" \
"Username: %(username)s\n" \
"Password: %(password)s\n\n" \
"You can now log into GitZen with this account at %(absolute_site_url)s " \
"and start tracking enhancements for %(product_name)s!"
def user_login_form_handler(request):
"""Processes the requests from the login page and authenticates the login of
an existing user.
Parameters:
request - The request object that contains the POST data from the login
forms.
"""
if request.method == 'POST':
log_form = AuthenticationForm(data=request.POST)
if log_form.is_valid():
login(request, log_form.get_user())
return HttpResponseRedirect(reverse('home'))
else:
log_form = AuthenticationForm()
return render_to_response('login.html', {'log_form': log_form},
context_instance=RequestContext(request))
def group_creation_form_handler(request):
"""Process the requests from the User Group Creation page.
Parameters:
request - The request object that contains the form data submitted from
the User Group Creation page.
"""
if request.method == 'POST':
group_superuser_form = NewGroupSuperuserForm(data=request.POST)
user_profile_form = UserProfileForm(data=request.POST)
api_access_data_form = NewAPIAccessDataForm(data=request.POST)
if group_superuser_form.is_valid() and user_profile_form.is_valid() \
and api_access_data_form.is_valid():
group_superuser = group_superuser_form.save()
api_access_data = api_access_data_form.save()
group_superuser_profile = user_profile_form.save(commit=False)
group_superuser_profile.user = group_superuser
group_superuser_profile.api_access_data = api_access_data
group_superuser_profile.is_group_superuser = True
group_superuser_profile.save()
# Authenticate and login the newly created group superuser so a
# GitHub access token can be added to the group's API access model
# through OAuth on the next pages.
user = authenticate(
username=group_superuser_form.cleaned_data['username'],
password=group_superuser_form.cleaned_data['password1']
)
login(request, user)
return HttpResponseRedirect(reverse('confirm_group_creation'))
else:
group_superuser_form = NewGroupSuperuserForm()
user_profile_form = UserProfileForm()
api_access_data_form = NewAPIAccessDataForm()
return render_to_response('group_creation.html',
{'group_superuser_form': group_superuser_form,
'user_profile_form': user_profile_form,
'api_access_data_form': api_access_data_form},
context_instance=RequestContext(request))
@login_required
def change_form_handler(request):
"""Processes the requests from the Change Account Data page. This includes
requests from the password change form and profile change form.
Parameters:
request - The request object that contains the POST data from one of the
change forms.
"""
profile = request.user.get_profile()
if request.POST:
# Process password change form
if 'password_input' in request.POST:
password_change_form = PasswordChangeForm(user=request.user,
data=request.POST)
if password_change_form.is_valid():
password_change_form.save()
return HttpResponseRedirect(reverse('confirm_changes'))
profile_change_form = SecuredProfileChangeForm(instance=profile)
# Process profile change form
elif 'profile_input' in request.POST:
profile_change_form = UserProfileForm(data=request.POST,
instance=profile)
if profile_change_form.is_valid():
profile_change_form.save()
return HttpResponseRedirect(reverse('confirm_changes'))
password_change_form = PasswordChangeForm(user=request.user)
else:
return HttpResponseRedirect(reverse('change_account_settings'))
else:
password_change_form = PasswordChangeForm(user=request.user)
profile_change_form = UserProfileForm(instance=profile)
return render_to_response('change_account_settings.html',
{'password_change_form': password_change_form,
'profile_change_form': profile_change_form},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def superuser_change_form_handler(request, user_id):
"""Process the requests from the group superuser Change Account Settings
page for the user selected on the superuser home page. This includes
requests from the profile change form and the set password form.
Parameters:
request - The request object that contains the POST data from the froms.
user_id - The ID number of the user that should be represented and
modified by the change forms.
"""
changing_user = User.objects.get(id=user_id)
changing_profile = changing_user.get_profile()
if request.POST:
# Process profile change form
if 'profile_input' in request.POST:
profile_change_form = UserProfileForm(data=request.POST,
instance=changing_profile)
if profile_change_form.is_valid():
profile_change_form.save()
return HttpResponseRedirect(
reverse('confirm_superuser_changes',
kwargs={'user_id': user_id})
)
set_password_form = SetPasswordForm(user=changing_user)
# Process password change form
elif 'password_input' in request.POST:
set_password_form = SetPasswordForm(user=changing_user,
data=request.POST)
if set_password_form.is_valid():
set_password_form.save()
return HttpResponseRedirect(
reverse('confirm_superuser_changes',
kwargs={'user_id': user_id})
)
profile_change_form = FullProfileChangeForm(
instance=changing_profile)
else:
return HttpResponseRedirect(
reverse('superuser_change_account_settings',
kwargs={'user_id': user_id})
)
else:
set_password_form = SetPasswordForm(user=changing_user)
profile_change_form = UserProfileForm(instance=changing_profile)
return render_to_response('superuser_change_account_settings.html',
{'username': changing_user.username,
'set_password_form': set_password_form,
'profile_change_form': profile_change_form,
'auth_url': GIT_AUTH_URL},
context_instance=RequestContext(request))
@login_required
def user_logout(request):
"""Logs out the currently logged in user.
Parameters:
request - The request object that contains the information for the user
that is being logged out.
"""
logout(request)
return HttpResponseRedirect(reverse('login'))
@login_required
def confirm_changes(request):
"""Renders the confirmation page to confirm the successful changes made to
the current user's account settings.
Parameters:
request - The request object sent with the call to the confirm page if
the requested changes were successfully made to the user's
account.
"""
return render_to_response('confirm_changes.html',
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_superuser_changes(request, user_id):
"""Renders the confirmation page to confirm the successful changes made to
the selected user's account settings by the group superuser.
Parameters:
request - The request object sent with the call to the confirm page if
the requested changes were successfully made to the selected
user's account.
user_id - The ID of the user that was just modified.
"""
username = User.objects.get(id=user_id).username
return render_to_response('confirm_superuser_changes.html',
{'username': username},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_group_creation(request):
"""Renders the confirmation page to confirm the successful creation of a new
user group.
Parameters:
request - The request object sent with the call to the confirm page if a
group and group superuser were successfully created from the
group creation page.
"""
return render_to_response('confirm_group_creation.html',
{'auth_url': GIT_AUTH_URL},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_git_oauth(request):
"""Finishes the OAuth2 access web flow after the user goes to the
GIT_AUTH_URL in either the group creation or change forms. Adds the access
token to the API access model for the group. For a newly created group,
their API access model was added to the session when the group was created.
This data is deleted from the session afterwards.
Parameters:
request - The request object that should contain the returned code from
GitHub in its GET parameters in addition to the API access
model that the access token should be added to.
"""
api_access_data = request.user.get_profile().api_access_data
if 'code' in request.GET:
code = request.GET['code']
response = OAUTH2_HANDLER.get_token(code)
api_access_data.git_token = response['access_token'][0]
access_granted = True
else:
api_access_data.git_token = ''
access_granted = False
api_access_data.save()
product_name = api_access_data.product_name
return render_to_response('confirm_git_oauth.html',
{'access_granted': access_granted,
'product_name': product_name},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_cache_building(request, is_reset):
"""Calls the function to build and index the cache for the API access model
of the logged in group superuser and renders a page that tells if the
caching was successful or not.
Parameters:
request - The request object that should have a group superuser logged
into it.
"""
context = {
'is_reset': is_reset,
'product_name': request.user.get_profile().api_access_data.product_name
}
try:
build_cache_index(request.user.get_profile().api_access_data)
except RequestException as e:
context['caching_successful'] = False
context['error_message'] = "There was an error connecting to the " \
"%(API_name)s API: %(exception_message)s. Try adjusting the" \
" group's API access settings." % \
{'API_name': e.args[1], 'exception_message': e.args[0]}
return render_to_response('confirm_cache_building.html', context,
context_instance=RequestContext(request))
context['caching_successful'] = True
return render_to_response('confirm_cache_building.html', context,
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_user_creation(request, user_id):
"""Renders the confirmation page to confirm the successful creation of a new
user.
Parameters:
request - The request object sent with the call to the confirm page if
the user was created successfully.
user_id - The ID of the user that was just created.
"""
user = User.objects.get(id=user_id)
username = user.username
product_name = user.get_profile().api_access_data.product_name
return render_to_response('confirm_user_creation.html',
{'username': username,
'product_name': product_name},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_user_deactivation(request, user_id):
"""Renders the confirmation page to confirm the successful deactivation of a
user by the superuser.
Parameters:
request - The request object sent with the call to the confirm page if
the selected user was successfully deactivated.
user_id - The ID of the user that was just deactivated by the superuser.
"""
deactivated_username = User.objects.get(id=user_id).username
return render_to_response('confirm_user_deactivation.html',
{'deactivated_username': deactivated_username},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_user_activation(request, user_id):
"""Renders the confirmation page to confirm the successful activation of a
previously deactivated user by the superuser.
Parameters:
request - The request object sent with the call to the confirm page if
the selected user was successfully activated.
user_id - The ID of the user that just activated by the superuser.
"""
activated_username = User.objects.get(id=user_id).username
return render_to_response('confirm_user_activation.html',
{'activated_username': activated_username},
context_instance=RequestContext(request))
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def confirm_api_access_changes(request):
"""Renders the confirmation page to confirm the successful changes made to
the API access settings for the superuser's group.
Parameters:
request - The request object sent with the call to the confirm page if
the requested changes were successfully made to the API
access settings.
"""
product_name = request.user.get_profile().api_access_data.product_name
return render_to_response('confirm_api_access_changes.html',
{'product_name': product_name},
context_instance=RequestContext(request))
@login_required
def home(request):
"""Gathers and builds the enhancement tracking data and renders the home
page of the app with this data. If the request for the page is from a
group superuser, it gets redirected to the group_superuser_home function.
Parameters:
request - The request object that contains the current user's data.
"""
profile = request.user.get_profile() # Current user's profile
utc_offset = profile.utc_offset
api_access_data = profile.api_access_data
product_name = api_access_data.product_name
context = {}
context['is_group_superuser'] = profile.is_group_superuser
try:
update_cache_index(api_access_data)
except RequestException as e:
context['api_requests_successful'] = False
context['error_message'] = 'There was an error connecting to ' \
'the %(API_name)s API: %(exception_message)s. If the error ' \
'persists after refreshing the page, inform the superuser ' \
'for %(product_name)s that the API access settings may need ' \
'adjustment.' % {'API_name': e.args[1],
'exception_message': e.args[0],
'product_name': product_name}
return render_to_response('home.html', context,
context_instance=RequestContext(request))
# Account for the time zone offset and get the enhancement data
cache_data = cache.get(api_access_data.id)
enhancement_tables = _time_adjust_enhancement_data(cache_data, utc_offset)
context = dict(context.items() + enhancement_tables.items())
# Add additional data to be used in the context of the home page
context['api_requests_successful'] = True
context['product_name'] = product_name
context['zen_url'] = api_access_data.zen_url
if profile.view_type == 'ZEN':
context['is_zendesk_user'] = True
else:
context['is_zendesk_user'] = False
context['is_github_user'] = not context['is_zendesk_user']
return render_to_response('home.html', context,
context_instance=RequestContext(request))
def _time_adjust_enhancement_data(cache_data, utc_offset):
"""Adjusts the enhancement data from the cache so that all of the dates and
times are in the passed UTC time zone.
Parameters:
cache_data - A dictionary of enhancement data stored under a group index
in the cache.
utc_offset - The numeric UTC offset for the time zone that the
enhancement data should be converted to.
Returns the four enhancement tables (need_attention, tracking,
unassociated_enhancements, and not_git_enhancements) adjusted to the passed
time zone in a dictionary with the keys being the tables' names.
"""
offset_delta = timedelta(hours=utc_offset)
for enhancement in chain(cache_data['need_attention'],
cache_data['tracking']):
zen_datetime = enhancement['zen_datetime'] + offset_delta
enhancement['zen_date'] = zen_datetime.strftime('%m/%d/%Y')
enhancement['zen_time'] = zen_datetime.strftime('%I:%M %p')
enhancement['zen_sortable_datetime'] = \
mktime(zen_datetime.timetuple())
git_datetime = enhancement['git_datetime'] + offset_delta
enhancement['git_date'] = git_datetime.strftime('%m/%d/%Y')
enhancement['git_time'] = git_datetime.strftime('%I:%M %p')
enhancement['git_sortable_datetime'] = \
mktime(git_datetime.timetuple())
for enhancement in chain(cache_data['unassociated_enhancements'],
cache_data['not_git_enhancements']):
zen_datetime = enhancement['zen_datetime'] + offset_delta
enhancement['zen_date'] = zen_datetime.strftime('%m/%d/%Y')
enhancement['zen_time'] = zen_datetime.strftime('%I:%M %p')
enhancement['zen_sortable_datetime'] = \
mktime(zen_datetime.timetuple())
enhancement_tables = {
'need_attention': cache_data['need_attention'],
'tracking': cache_data['tracking'],
'unassociated_enhancements': cache_data['unassociated_enhancements'],
'not_git_enhancements': cache_data['not_git_enhancements']
}
return enhancement_tables
@login_required
@user_passes_test(lambda user: user.get_profile().is_group_superuser)
def group_superuser_home(request):
"""Processes the various form requests from the group superuser home page.
This includes the forms to create a new user, to deactivate or reactivate a
user, to change the group API access settings, and to change the password
for the superuser.
Parameters:
request - The request object that contains the group superuser data and
the POST data from the various forms.
"""
api_access_data = request.user.get_profile().api_access_data
product_name = api_access_data.product_name
if request.POST:
# Process the new user form for getting the information needed to create
# a new user and add them to the group
if 'user_creation_input' in request.POST:
new_user_form = NewUserForm(data=request.POST)
user_profile_form = UserProfileForm(data=request.POST)
if new_user_form.is_valid() and user_profile_form.is_valid():
password = User.objects.make_random_password()
user = User.objects.create_user(
new_user_form.cleaned_data['username'],
new_user_form.cleaned_data['email'],
password
)
user_profile = user_profile_form.save(commit=False)
user_profile.user = user
user_profile.api_access_data = api_access_data
user_profile.save()
# Email the new user to let them know an account has been
# created for them in this group and to tell them to change
# their temporary random password.
user.email_user(
'New GitZen Account',
NEW_USER_EMAIL_MESSAGE % {'product_name': product_name,
'username': user.username,
'password': password,
'absolute_site_url': settings.ABSOLUTE_SITE_URL}
)
return HttpResponseRedirect(
reverse('confirm_user_creation',
kwargs={'user_id': user.id})
)
user_select_form = ActiveUserSelectionForm(api_access_data)
user_deactivate_form = ActiveUserSelectionForm(api_access_data)
user_activate_form = InactiveUserSelectionForm(api_access_data)
api_access_change_form = \
ChangeAPIAccessDataForm(instance=api_access_data)
# Process the user selection form for selecting a user to modify
elif 'user_select_input' in request.POST:
user_select_form = ActiveUserSelectionForm(api_access_data,
data=request.POST)
if user_select_form.is_valid():
user = user_select_form.cleaned_data['profile'].user
return HttpResponseRedirect(
reverse('superuser_change_account_settings',
kwargs={'user_id': user.id})
)
new_user_form = NewUserForm()
user_profile_form = UserProfileForm()
user_deactivate_form = ActiveUserSelectionForm(api_access_data)
user_activate_form = InactiveUserSelectionForm(api_access_data)
api_access_change_form = \
ChangeAPIAccessDataForm(instance=api_access_data)
# Process the user selection form for deactivating a user
elif 'user_deactivate_input' in request.POST:
user_deactivate_form = ActiveUserSelectionForm(api_access_data,
data=request.POST)
if user_deactivate_form.is_valid():
user = user_deactivate_form.cleaned_data['profile'].user
user.is_active = False
user.save()
return HttpResponseRedirect(
reverse('confirm_user_deactivation',
kwargs={'user_id': user.id})
)
new_user_form = NewUserForm()
user_profile_form = UserProfileForm()
user_select_form = ActiveUserSelectionForm(api_access_data)
user_activate_form = InactiveUserSelectionForm(api_access_data)
api_access_change_form = \
ChangeAPIAccessDataForm(instance=api_access_data)
# Process the user selection form for activating a user
elif 'user_activate_input' in request.POST:
user_activate_form = InactiveUserSelectionForm(api_access_data,
data=request.POST)
if user_activate_form.is_valid():
user = user_activate_form.cleaned_data['profile'].user
user.is_active = True
user.save()
return HttpResponseRedirect(
reverse('confirm_user_activation',
kwargs={'user_id': user.id})
)
new_user_form = NewUserForm()
user_profile_form = UserProfileForm()
user_select_form = ActiveUserSelectionForm(api_access_data)
user_deactivate_form = ActiveUserSelectionForm(api_access_data)
api_access_change_form = \
ChangeAPIAccessDataForm(instance=api_access_data)
# Process the API access data form for changing the API access data for
# the group.
elif 'api_access_change_input' in request.POST:
api_access_change_form = ChangeAPIAccessDataForm(data=request.POST,
instance=api_access_data)
if api_access_change_form.is_valid():
api_access_change_form.save()
return HttpResponseRedirect(
reverse('confirm_api_access_changes')
)
new_user_form = NewUserForm()
user_profile_form = UserProfileForm()
user_select_form = ActiveUserSelectionForm(api_access_data)
user_deactivate_form = ActiveUserSelectionForm(api_access_data)
user_activate_form = InactiveUserSelectionForm(api_access_data)
else:
return HttpResponseRedirect(reverse('home'))
else:
new_user_form = NewUserForm()
user_profile_form = UserProfileForm()
user_select_form = ActiveUserSelectionForm(api_access_data)
user_deactivate_form = ActiveUserSelectionForm(api_access_data)
user_activate_form = InactiveUserSelectionForm(api_access_data)
api_access_change_form = \
ChangeAPIAccessDataForm(instance=api_access_data)
context = {
'new_user_form': new_user_form,
'user_profile_form': user_profile_form,
'user_select_form': user_select_form,
'user_deactivate_form': user_deactivate_form,
'user_activate_form': user_activate_form,
'api_access_change_form': api_access_change_form,
'product_name': product_name,
'auth_url': GIT_AUTH_URL
}
return render_to_response('superuser_home.html', context,
context_instance=RequestContext(request))
|
slohse/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_group.py | 34 | #!/usr/bin/python
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_group
short_description: Manage OpenStack Identity Groups
extends_documentation_fragment: openstack
version_added: "2.1"
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
description:
- Manage OpenStack Identity Groups. Groups can be created, deleted or
updated. Only the I(description) value can be updated.
options:
name:
description:
- Group name
required: true
description:
description:
- Group description
domain_id:
description:
- Domain id to create the group in if the cloud supports domains.
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a group named "demo"
- os_group:
cloud: mycloud
state: present
name: demo
description: "Demo Group"
domain_id: demoid
# Update the description on existing "demo" group
- os_group:
cloud: mycloud
state: present
name: demo
description: "Something else"
domain_id: demoid
# Delete group named "demo"
- os_group:
cloud: mycloud
state: absent
name: demo
'''
RETURN = '''
group:
description: Dictionary describing the group.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique group ID
type: string
sample: "ee6156ff04c645f481a6738311aea0b0"
name:
description: Group name
type: string
sample: "demo"
description:
description: Group description
type: string
sample: "Demo Group"
domain_id:
description: Domain for the group
type: string
sample: "default"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, description, group):
if state == 'present' and not group:
return True
if state == 'present' and description is not None and group.description != description:
return True
if state == 'absent' and group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(required=False, default=None),
domain_id=dict(required=False, default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
name = module.params.get('name')
description = module.params.get('description')
state = module.params.get('state')
domain_id = module.params.pop('domain_id')
sdk, cloud = openstack_cloud_from_module(module)
try:
if domain_id:
group = cloud.get_group(name, filters={'domain_id': domain_id})
else:
group = cloud.get_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, description, group))
if state == 'present':
if group is None:
group = cloud.create_group(
name=name, description=description, domain=domain_id)
changed = True
else:
if description is not None and group.description != description:
group = cloud.update_group(
group.id, description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, group=group)
elif state == 'absent':
if group is None:
changed = False
else:
cloud.delete_group(group.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gfcapalbo/website | refs/heads/8.0 | website_sale_survey/models/sale.py | 5 | # -*- coding: utf-8 -*-
# Copyright 2016 Jamotion GmbH
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# Created by angel.moya on 01.09.2016.
#
from openerp import models, api, fields
class SaleOrder(models.Model):
_inherit = 'sale.order'
survey_id = fields.Many2one(
comodel_name='survey.survey',
string='Survey',
compute='_compute_survey_id')
@api.multi
def _compute_survey_id(self):
survey = self.env['survey.survey'].search(
[('default_for_website_sales', '=', True)])
survey = len(survey) > 0 and survey[0] or False
for record in self:
record.survey_id = survey
|
stephane-/project_logical_framework | refs/heads/master | project_logical_framework.py | 1 | from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from lxml import etree as ET
class project_logical_framework_project(osv.Model):
_inherit = 'project.project'
_columns = {
'logical_framework': fields.one2many(
'project_logical_framework.logical_framework',
'project_id',
'Logical Framework'),
}
class project_logical_framework_logical_framework(osv.Model):
_name = 'project_logical_framework.logical_framework'
_order = "type"
def _logic_title(self, cr, uid, ids, field_name, arg, context):
res = {}
record = self.browse(cr, uid, ids, context=context)
for data in record:
res_str = dict(
self.pool.get('project_logical_framework.logical_framework').
fields_get(cr, uid, allfields=['type'], context=context)
['type']['selection'])[data.type]
res_str += "\n" + str(data.logic)
res[data.id] = res_str
return res
_columns = {
'project_id' : fields.many2one(
'project.project',
'logical_framework',
'Project'),
'type': fields.selection((
('1','Global Objectives:'),
('2','Specific Objectives:'),
('3','Results:'),
('4','Activities:')),
'Type', required="true"),
'logic': fields.text('Logic'),
'logic_title': fields.function(_logic_title, type="text"),
'intervention': fields.text('Intervention'),
'indicators': fields.text('Indicators'),
'verification': fields.text('Verification source'),
'hypothesis': fields.text('Hypothesis'),
} |
papouso/odoo | refs/heads/8.0 | addons/purchase_requisition/__openerp__.py | 260 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'website': 'https://www.odoo.com/page/purchase',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pli3/enigma2-git | refs/heads/master | lib/python/Screens/SoftwareUpdate.py | 3 | from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
from enigma import eTimer, getBoxType, eDVBDB
from urllib import urlopen
import socket
import os
import re
import time
class UpdatePlugin(Screen):
skin = """
<screen name="UpdatePlugin" position="center,center" size="550,300">
<widget name="activityslider" position="0,0" size="550,5" />
<widget name="slider" position="0,150" size="550,30" />
<widget source="package" render="Label" position="10,30" size="540,20" font="Regular;18" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
<widget source="status" render="Label" position="10,180" size="540,100" font="Regular;20" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, *args):
Screen.__init__(self, session)
self.sliderPackages = { "dreambox-dvb-modules": 1, "enigma2": 2, "tuxbox-image-info": 3 }
self.setTitle(_("Software update"))
self.slider = Slider(0, 4)
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = StaticText(_("Please wait..."))
self["status"] = self.status
self.package = StaticText(_("Package list update"))
self["package"] = self.package
self.oktext = _("Press OK on your remote control to continue.")
self.packages = 0
self.error = 0
self.processed_packages = []
self.total_packages = None
self.channellist_only = 0
self.channellist_name = ''
self.updating = False
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onClose.append(self.__close)
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.checkTraficLight)
self.activityTimer.callback.append(self.doActivityTimer)
self.activityTimer.start(100, True)
def checkTraficLight(self):
self.activityTimer.callback.remove(self.checkTraficLight)
self.activityTimer.start(100, False)
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
message = ""
picon = None
default = True
try:
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
if getBoxType() in urlopen("http://openpli.org/status").read().split(','):
message = _("The current beta image might not be stable.\nFor more information see %s.") % ("www.openpli.org")
picon = MessageBox.TYPE_ERROR
default = False
except:
message = _("The status of the current beta image could not be checked because %s can not be reached.") % ("www.openpli.org")
picon = MessageBox.TYPE_ERROR
default = False
socket.setdefaulttimeout(currentTimeoutDefault)
if default:
self.startActualUpdate(True)
else:
message += "\n" + _("Do you want to update your receiver?")
self.session.openWithCallback(self.startActualUpdate, MessageBox, message, default = default, picon = picon)
def getLatestImageTimestamp(self):
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
latestImageTimestamp = ""
try:
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
latestImageTimestamp = re.findall('<dd>(.*?)</dd>', urlopen("http://openpli.org/download/"+getBoxType()+"/").read())[0][:16]
latestImageTimestamp = time.strftime(_("%d-%b-%Y %-H:%M"), time.strptime(latestImageTimestamp, "%Y/%m/%d %H:%M"))
except:
pass
socket.setdefaulttimeout(currentTimeoutDefault)
return latestImageTimestamp
def startActualUpdate(self,answer):
if answer:
self.updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
else:
self.close()
def doActivityTimer(self):
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def showUpdateCompletedMessage(self):
self.setEndMessage(ngettext("Update completed, %d package was installed.", "Update completed, %d packages were installed.", self.packages) % self.packages)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE:
if self.sliderPackages.has_key(param):
self.slider.setValue(self.sliderPackages[param])
self.package.setText(param)
self.status.setText(_("Upgrading") + ": %s/%s" % (self.packages, self.total_packages))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_INSTALL:
self.package.setText(param)
self.status.setText(_("Installing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_REMOVE:
self.package.setText(param)
self.status.setText(_("Removing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_CONFIGURING:
self.package.setText(param)
self.status.setText(_("Configuring"))
elif event == IpkgComponent.EVENT_MODIFIED:
if config.plugins.softwaremanager.overwriteConfigFiles.value in ("N", "Y"):
self.ipkg.write(True and config.plugins.softwaremanager.overwriteConfigFiles.value)
else:
self.session.openWithCallback(
self.modificationCallback,
MessageBox,
_("A configuration file (%s) has been modified since it was installed.\nDo you want to keep your modifications?") % (param)
)
elif event == IpkgComponent.EVENT_ERROR:
self.error += 1
elif event == IpkgComponent.EVENT_DONE:
if self.updating:
self.updating = False
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
elif self.ipkg.currentCommand == IpkgComponent.CMD_UPGRADE_LIST:
self.total_packages = len(self.ipkg.getFetchedList())
if self.total_packages:
latestImageTimestamp = self.getLatestImageTimestamp()
if latestImageTimestamp:
message = _("Do you want to update your receiver to %s?") % self.getLatestImageTimestamp() + "\n"
else:
message = _("Do you want to update your receiver?") + "\n"
message = message + "(" + (ngettext("%s updated package available", "%s updated packages available", self.total_packages) % self.total_packages) + ")"
choices = [(_("Update and reboot (recommended)"), "cold"),
(_("Update and ask to reboot"), "hot"),
(_("Update channel list only"), "channels"),
(_("Cancel"), "")]
self.session.openWithCallback(self.startActualUpgrade, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.close, MessageBox, _("No updates available"), type=MessageBox.TYPE_INFO, timeout=10, close_on_any_key=True)
elif self.channellist_only > 0:
if self.channellist_only == 1:
self.setEndMessage(_("Could not find installed channel list."))
elif self.channellist_only == 2:
self.slider.setValue(2)
self.ipkg.startCmd(IpkgComponent.CMD_REMOVE, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 3:
self.slider.setValue(3)
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, {'package': self.channellist_name})
self.channellist_only += 1
elif self.channellist_only == 4:
self.showUpdateCompletedMessage()
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
elif self.error == 0:
self.showUpdateCompletedMessage()
else:
self.activityTimer.stop()
self.activityslider.setValue(0)
error = _("Your receiver might be unusable now. Please consult the manual for further assistance before rebooting your receiver.")
if self.packages == 0:
error = _("No updates available. Please try again later.")
if self.updating:
error = _("Update failed. Your receiver does not have a working internet connection.")
self.status.setText(_("Error") + " - " + error)
elif event == IpkgComponent.EVENT_LISTITEM:
if 'enigma2-plugin-settings-' in param[0] and self.channellist_only > 0:
self.channellist_name = param[0]
self.channellist_only = 2
#print event, "-", param
pass
def setEndMessage(self, txt):
self.slider.setValue(4)
self.activityTimer.stop()
self.activityslider.setValue(0)
self.package.setText(txt)
self.status.setText(self.oktext)
def startActualUpgrade(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == "cold":
self.session.open(TryQuitMainloop,retvalue=42)
self.close()
elif answer[1] == "channels":
self.channellist_only = 1
self.slider.setValue(1)
self.ipkg.startCmd(IpkgComponent.CMD_LIST, args = {'installed_only': True})
else:
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE, args = {'test_only': False})
def modificationCallback(self, res):
self.ipkg.write(res and "N" or "Y")
def exit(self):
if not self.ipkg.isRunning():
if self.packages != 0 and self.error == 0 and self.channellist_only == 0:
self.session.openWithCallback(self.exitAnswer, MessageBox, _("Update completed. Do you want to reboot your receiver?"))
else:
self.close()
else:
if not self.updating:
self.close()
def exitAnswer(self, result):
if result is not None and result:
self.session.open(TryQuitMainloop,retvalue=2)
self.close()
def __close(self):
self.ipkg.removeCallback(self.ipkgCallback)
|
IONISx/XBlock | refs/heads/master | xblock/__init__.py | 2 | """
XBlock Courseware Components
"""
|
kalefranz/auxlib | refs/heads/develop | setup.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from setuptools import setup, find_packages
import sys
# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src directory to the sys.path.
here = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.join(here, "auxlib")
sys.path.insert(0, src_dir)
import auxlib # NOQA
import auxlib.packaging # NOQA
requirements = []
if sys.version_info < (3, 4):
requirements.append("enum34")
if sys.version_info < (2, 7):
requirements.append("ordereddict")
with open(os.path.join(here, "README.rst")) as f:
long_description = f.read()
setup(
name=auxlib.__name__,
version=auxlib.__version__,
author=auxlib.__author__,
author_email=auxlib.__email__,
url=auxlib.__url__,
license=auxlib.__license__,
description=auxlib.__summary__,
long_description=long_description,
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
install_requires=requirements,
# tests_require=["tox"],
extras_require={
'crypt': ["pycrypto"],
'yaml': ["pyyaml"],
},
cmdclass={
'build_py': auxlib.packaging.BuildPyCommand,
'sdist': auxlib.packaging.SDistCommand,
'test': auxlib.packaging.Tox,
},
)
|
shujaatak/UAV_MissionPlanner | refs/heads/master | Lib/unittest/result.py | 223 | """Test result object"""
import os
import sys
import traceback
from StringIO import StringIO
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
edx/edx-platform | refs/heads/master | openedx/core/djangoapps/external_user_ids/migrations/0002_mb_coaching_20200210_1754.py | 4 | # Generated by Django 1.11.28 on 2020-02-10 17:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('external_user_ids', '0001_initial'),
]
coaching_name = 'mb_coaching'
def create_mb_coaching_type(apps, schema_editor):
"""
Add a MicroBachelors (MB) coaching type
"""
ExternalIdType = apps.get_model('external_user_ids', 'ExternalIdType')
ExternalIdType.objects.update_or_create(name=Migration.coaching_name, description='MicroBachelors Coaching')
def delete_mb_coaching_type(apps, schema_editor):
"""
Delete the MicroBachelors (MB) coaching type
"""
ExternalIdType = apps.get_model('external_user_ids', 'ExternalIdType')
ExternalIdType.objects.filter(
name=Migration.coaching_name
).delete()
operations = [
migrations.RunPython(create_mb_coaching_type, reverse_code=delete_mb_coaching_type),
]
|
Fokko/incubator-airflow | refs/heads/master | airflow/contrib/hooks/aws_datasync_hook.py | 2 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.datasync`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.hooks.datasync import AWSDataSyncHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.datasync`.",
DeprecationWarning,
stacklevel=2,
)
|
juanpflores94/Hodor | refs/heads/master | static/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 886 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.generator.ninja as ninja_generator
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
from gyp.common import OrderedSet
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
'msvs_enable_winrt',
'msvs_requires_importlibrary',
'msvs_enable_winphone',
'msvs_application_type_revision',
'msvs_target_platform_version',
'msvs_target_platform_minversion',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False)
def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
# Remove trigger_file from excluded_sources to let the rule be triggered
# (e.g. rule trigger ax_enums.idl is added to excluded_sources
# because it's also in an action's inputs in the same project)
excluded_sources.discard(_FixPath(trigger_file))
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not is_msbuild:
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.asm':
group = 'masm'
element = 'MASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
properties = [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
properties[0].append(['PreferredToolArchitecture', 'x64'])
if spec.get('msvs_enable_winrt'):
properties[0].append(['DefaultLanguage', 'en-US'])
properties[0].append(['AppContainerApplication', 'true'])
if spec.get('msvs_application_type_revision'):
app_type_revision = spec.get('msvs_application_type_revision')
properties[0].append(['ApplicationTypeRevision', app_type_revision])
else:
properties[0].append(['ApplicationTypeRevision', '8.1'])
if spec.get('msvs_target_platform_version'):
target_platform_version = spec.get('msvs_target_platform_version')
properties[0].append(['WindowsTargetPlatformVersion',
target_platform_version])
if spec.get('msvs_target_platform_minversion'):
target_platform_minversion = spec.get('msvs_target_platform_minversion')
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_minversion])
else:
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_version])
if spec.get('msvs_enable_winphone'):
properties[0].append(['ApplicationType', 'Windows Phone'])
else:
properties[0].append(['ApplicationType', 'Windows Store'])
return properties
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
if 'msvs_enable_winrt' not in spec :
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'Midl',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
else:
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
# Turn off WinRT compilation
_ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
# Turn on import libraries if appropriate
if spec.get('msvs_requires_importlibrary'):
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded):
groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
'rule_dependency']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
rule_dependencies = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
rule_dependencies,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
import_masm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
import_masm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
if spec.get('msvs_enable_winphone'):
content += _GetMSBuildLocalProperties('v120_wp81')
else:
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += import_masm_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += import_masm_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
for c in commands])
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
|
tndatacommons/tndata_backend | refs/heads/master | tndata_backend/goals/migrations/0127_auto_20160310_2145.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('goals', '0126_convert_core_actions_to_showing'),
]
operations = [
migrations.AlterModelOptions(
name='usercompletedaction',
options={'verbose_name': 'User Completed Action', 'get_latest_by': 'updated_on', 'ordering': ['-updated_on', 'user', 'action'], 'verbose_name_plural': 'User Completed Action'},
),
migrations.AlterField(
model_name='dailyprogress',
name='behaviors_status',
field=jsonfield.fields.JSONField(default=dict, blank=True, help_text="Describes the user's status on work toward this behavior; i.e. From which bucket should Actions be delivered."),
),
]
|
neuroneuro15/ratcave | refs/heads/master | examples/shadow_demo.py | 1 | import pyglet
import ratcave as rc
import itertools as it
import numpy as np
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
# Create Window
window = pyglet.window.Window(resizable=True, vsync=False)
# Create Mesh
obj_filename = rc.resources.obj_primitives
obj_reader = rc.WavefrontReader(obj_filename)
monkey = obj_reader.get_mesh("Monkey")
monkey.uniforms['flat_shading'] = False
monkey.position.xyz = 0, 0, -4
monkey.scale.xyz = .25
monkey.point_size = .1
plane = obj_reader.get_mesh('Plane')
plane.position.xyz = 0, 0, -5
plane.rotation.x = 0
plane.scale.xyz = 8
plane.uniforms['spec_weight'] = 0
plane.uniforms['flat_shading'] = True
fps_display = pyglet.window.FPSDisplay(window)
light = rc.Light()
light.projection.fov_y = 75.
light.position.z = 3
light.time = 0.
fbo_shadow = rc.FBO(texture=rc.DepthTexture(width=2048, height=2048))
plane.textures.append(fbo_shadow.texture)
plane.textures.append(rc.Texture.from_image(rc.resources.img_colorgrid))
monkey.textures.append(fbo_shadow.texture)
@window.event
def on_draw():
window.clear()
with ExitStack() as stack:
for shader in [rc.resources.shadow_shader, rc.default_shader]:
with shader, rc.default_states, light, rc.default_camera:
if shader == rc.resources.shadow_shader:
stack.enter_context(fbo_shadow)
window.clear()
else:
stack.close()
for x, y in it.product([-2, -1, 0, 1, 2], [-2, -1, 0, 1, 2]):
monkey.position.x = x
monkey.position.y = y
monkey.drawmode = rc.GL_POINTS if x % 2 and y % 2 else rc.GL_TRIANGLES
monkey.uniforms['diffuse'][0] = (x + 1) / 4.
monkey.uniforms['diffuse'][1:] = (y + 1) / 4.
monkey.scale.z = np.linalg.norm((x, y)) / 10. + .03
monkey.draw()
plane.draw()
fps_display.draw()
def update(dt):
monkey.rotation.y += dt * 40.
light.time += dt
light.position.x = np.sin(light.time * .5) * 3
light.position.y = np.cos(light.time * .5) * 3
pyglet.clock.schedule(update)
pyglet.app.run() |
thatotherguy/cryptsftp | refs/heads/master | lib/paramiko/sftp_si.py | 27 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SFTPServerInterface} is an interface to override for SFTP server support.
"""
import os
from paramiko.common import *
from paramiko.sftp import *
class SFTPServerInterface (object):
"""
This class defines an interface for controlling the behavior of paramiko
when using the L{SFTPServer} subsystem to provide an SFTP server.
Methods on this class are called from the SFTP session's thread, so you can
block as long as necessary without affecting other sessions (even other
SFTP sessions). However, raising an exception will usually cause the SFTP
session to abruptly end, so you will usually want to catch exceptions and
return an appropriate error code.
All paths are in string form instead of unicode because not all SFTP
clients & servers obey the requirement that paths be encoded in UTF-8.
"""
def __init__ (self, server, *largs, **kwargs):
"""
Create a new SFTPServerInterface object. This method does nothing by
default and is meant to be overridden by subclasses.
@param server: the server object associated with this channel and
SFTP subsystem
@type server: L{ServerInterface}
"""
super(SFTPServerInterface, self).__init__(*largs, **kwargs)
def session_started(self):
"""
The SFTP server session has just started. This method is meant to be
overridden to perform any necessary setup before handling callbacks
from SFTP operations.
"""
pass
def session_ended(self):
"""
The SFTP server session has just ended, either cleanly or via an
exception. This method is meant to be overridden to perform any
necessary cleanup before this C{SFTPServerInterface} object is
destroyed.
"""
pass
def open(self, path, flags, attr):
"""
Open a file on the server and create a handle for future operations
on that file. On success, a new object subclassed from L{SFTPHandle}
should be returned. This handle will be used for future operations
on the file (read, write, etc). On failure, an error code such as
L{SFTP_PERMISSION_DENIED} should be returned.
C{flags} contains the requested mode for opening (read-only,
write-append, etc) as a bitset of flags from the C{os} module:
- C{os.O_RDONLY}
- C{os.O_WRONLY}
- C{os.O_RDWR}
- C{os.O_APPEND}
- C{os.O_CREAT}
- C{os.O_TRUNC}
- C{os.O_EXCL}
(One of C{os.O_RDONLY}, C{os.O_WRONLY}, or C{os.O_RDWR} will always
be set.)
The C{attr} object contains requested attributes of the file if it
has to be created. Some or all attribute fields may be missing if
the client didn't specify them.
@note: The SFTP protocol defines all files to be in "binary" mode.
There is no equivalent to python's "text" mode.
@param path: the requested path (relative or absolute) of the file
to be opened.
@type path: str
@param flags: flags or'd together from the C{os} module indicating the
requested mode for opening the file.
@type flags: int
@param attr: requested attributes of the file if it is newly created.
@type attr: L{SFTPAttributes}
@return: a new L{SFTPHandle} I{or error code}.
@rtype L{SFTPHandle}
"""
return SFTP_OP_UNSUPPORTED
def list_folder(self, path):
"""
Return a list of files within a given folder. The C{path} will use
posix notation (C{"/"} separates folder names) and may be an absolute
or relative path.
The list of files is expected to be a list of L{SFTPAttributes}
objects, which are similar in structure to the objects returned by
C{os.stat}. In addition, each object should have its C{filename}
field filled in, since this is important to a directory listing and
not normally present in C{os.stat} results. The method
L{SFTPAttributes.from_stat} will usually do what you want.
In case of an error, you should return one of the C{SFTP_*} error
codes, such as L{SFTP_PERMISSION_DENIED}.
@param path: the requested path (relative or absolute) to be listed.
@type path: str
@return: a list of the files in the given folder, using
L{SFTPAttributes} objects.
@rtype: list of L{SFTPAttributes} I{or error code}
@note: You should normalize the given C{path} first (see the
C{os.path} module) and check appropriate permissions before returning
the list of files. Be careful of malicious clients attempting to use
relative paths to escape restricted folders, if you're doing a direct
translation from the SFTP server path to your local filesystem.
"""
return SFTP_OP_UNSUPPORTED
def stat(self, path):
"""
Return an L{SFTPAttributes} object for a path on the server, or an
error code. If your server supports symbolic links (also known as
"aliases"), you should follow them. (L{lstat} is the corresponding
call that doesn't follow symlinks/aliases.)
@param path: the requested path (relative or absolute) to fetch
file statistics for.
@type path: str
@return: an attributes object for the given file, or an SFTP error
code (like L{SFTP_PERMISSION_DENIED}).
@rtype: L{SFTPAttributes} I{or error code}
"""
return SFTP_OP_UNSUPPORTED
def lstat(self, path):
"""
Return an L{SFTPAttributes} object for a path on the server, or an
error code. If your server supports symbolic links (also known as
"aliases"), you should I{not} follow them -- instead, you should
return data on the symlink or alias itself. (L{stat} is the
corresponding call that follows symlinks/aliases.)
@param path: the requested path (relative or absolute) to fetch
file statistics for.
@type path: str
@return: an attributes object for the given file, or an SFTP error
code (like L{SFTP_PERMISSION_DENIED}).
@rtype: L{SFTPAttributes} I{or error code}
"""
return SFTP_OP_UNSUPPORTED
def remove(self, path):
"""
Delete a file, if possible.
@param path: the requested path (relative or absolute) of the file
to delete.
@type path: str
@return: an SFTP error code like L{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
def rename(self, oldpath, newpath):
"""
Rename (or move) a file. The SFTP specification implies that this
method can be used to move an existing file into a different folder,
and since there's no other (easy) way to move files via SFTP, it's
probably a good idea to implement "move" in this method too, even for
files that cross disk partition boundaries, if at all possible.
@note: You should return an error if a file with the same name as
C{newpath} already exists. (The rename operation should be
non-desctructive.)
@param oldpath: the requested path (relative or absolute) of the
existing file.
@type oldpath: str
@param newpath: the requested new path of the file.
@type newpath: str
@return: an SFTP error code like L{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
def mkdir(self, path, attr):
"""
Create a new directory with the given attributes. The C{attr}
object may be considered a "hint" and ignored.
The C{attr} object will contain only those fields provided by the
client in its request, so you should use C{hasattr} to check for
the presense of fields before using them. In some cases, the C{attr}
object may be completely empty.
@param path: requested path (relative or absolute) of the new
folder.
@type path: str
@param attr: requested attributes of the new folder.
@type attr: L{SFTPAttributes}
@return: an SFTP error code like L{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
def rmdir(self, path):
"""
Remove a directory if it exists. The C{path} should refer to an
existing, empty folder -- otherwise this method should return an
error.
@param path: requested path (relative or absolute) of the folder
to remove.
@type path: str
@return: an SFTP error code like L{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
def chattr(self, path, attr):
"""
Change the attributes of a file. The C{attr} object will contain
only those fields provided by the client in its request, so you
should check for the presence of fields before using them.
@param path: requested path (relative or absolute) of the file to
change.
@type path: str
@param attr: requested attributes to change on the file.
@type attr: L{SFTPAttributes}
@return: an error code like L{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
def canonicalize(self, path):
"""
Return the canonical form of a path on the server. For example,
if the server's home folder is C{/home/foo}, the path
C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the python methods in C{os.path} useful, especially
C{os.path.normpath} and C{os.path.realpath}.
The default implementation returns C{os.path.normpath('/' + path)}.
"""
if os.path.isabs(path):
out = os.path.normpath(path)
else:
out = os.path.normpath('/' + path)
if sys.platform == 'win32':
# on windows, normalize backslashes to sftp/posix format
out = out.replace('\\', '/')
return out
def readlink(self, path):
"""
Return the target of a symbolic link (or shortcut) on the server.
If the specified path doesn't refer to a symbolic link, an error
should be returned.
@param path: path (relative or absolute) of the symbolic link.
@type path: str
@return: the target path of the symbolic link, or an error code like
L{SFTP_NO_SUCH_FILE}.
@rtype: str I{or error code}
"""
return SFTP_OP_UNSUPPORTED
def symlink(self, target_path, path):
"""
Create a symbolic link on the server, as new pathname C{path},
with C{target_path} as the target of the link.
@param target_path: path (relative or absolute) of the target for
this new symbolic link.
@type target_path: str
@param path: path (relative or absolute) of the symbolic link to
create.
@type path: str
@return: an error code like C{SFTP_OK}.
@rtype: int
"""
return SFTP_OP_UNSUPPORTED
|
johnkit/vtk-dev | refs/heads/master | IO/EnSight/Testing/Python/EnSightBlow2ASCII.py | 20 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow2_ascii.case")
reader.SetTimeValue(1)
geom = vtk.vtkGeometryFilter()
geom.SetInputConnection(reader.GetOutputPort())
mapper = vtk.vtkHierarchicalPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.ColorByArrayComponent("displacement",0)
mapper.SetScalarRange(0,2.08)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign our actor to the renderer
ren1.AddActor(actor)
# enable user interface interactor
iren.Initialize()
ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071)
ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
|
jucimarjr/IPC_2017-1 | refs/heads/master | lista04/lista04_lista02_questao20.py | 1 | #-----------------------------------------------------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
#
# Adham Lucas da Silva Oliveira 1715310059
# Alexandre Marques Uchôa 1715310028
# André Luís Laborda Neves 1515070006
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Diego Reis Figueira 1515070169
#
#Leia um valor inteiro correspondente à idade de uma pessoa em dias e informe-a em anos, meses e dias
#-----------------------------------------------------------------------------------------------------------------------
days = int(input())
years = days//365
days = days - (years * 365)
months = days//30
days = days - (months * 30)
print(years,'ano(s)')
print(months,'mes(es)')
print(days,'dia(s)')
|
charbeljc/account-invoicing | refs/heads/8.0 | account_invoice_partner/model/__init__.py | 46 | from . import account_invoice
|
taaviteska/django | refs/heads/master | tests/view_tests/tests/test_defaults.py | 60 | import datetime
from django.contrib.sites.models import Site
from django.http import Http404
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.views.defaults import (
bad_request, page_not_found, permission_denied, server_error,
)
from ..models import Article, Author, UrlArticle
@override_settings(ROOT_URLCONF='view_tests.urls')
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
nonexistent_urls = [
'/nonexistent_url/', # this is in urls.py
'/other_nonexistent_url/', # this NOT in urls.py
]
@classmethod
def setUpTestData(cls):
Author.objects.create(name='Boris')
Article.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Article.objects.create(
title='Current Article', slug='current_article', author_id=1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23)
)
Article.objects.create(
title='Future Article', slug='future_article', author_id=1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23)
)
UrlArticle.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Site(id=1, domain='testserver', name='testserver').save()
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.nonexistent_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': '{{ csrf_token }}',
}),
],
},
}])
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.nonexistent_urls:
response = self.client.get(url)
self.assertNotEqual(response.content, b'NOTPROVIDED')
self.assertNotEqual(response.content, b'')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/server_error/')
self.assertEqual(response.status_code, 500)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': 'This is a test template for a 404 error '
'(path: {{ request_path }}, exception: {{ exception }}).',
'500.html': 'This is a test template for a 500 error.',
}),
],
},
}])
def test_custom_templates(self):
"""
404.html and 500.html templates are picked by their respective handler.
"""
response = self.client.get('/server_error/')
self.assertContains(response, "test template for a 500 error", status_code=500)
response = self.client.get('/no_such_url/')
self.assertContains(response, 'path: /no_such_url/', status_code=404)
self.assertContains(response, 'exception: Resolver404', status_code=404)
response = self.client.get('/technical404/')
self.assertContains(response, 'exception: Testing technical 404.', status_code=404)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
def test_custom_templates_wrong(self):
"""
Default error views should raise TemplateDoesNotExist when passed a
template that doesn't exist.
"""
rf = RequestFactory()
request = rf.get('/')
with self.assertRaises(TemplateDoesNotExist):
bad_request(request, Exception(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
permission_denied(request, Exception(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
page_not_found(request, Http404(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
server_error(request, template_name='nonexistent')
|
elena/django | refs/heads/master | tests/ordering/models.py | 22 | """
Specifying ordering
Specify default ordering for a model using the ``ordering`` attribute, which
should be a list or tuple of field names. This tells Django how to order
``QuerySet`` results.
If a field name in ``ordering`` starts with a hyphen, that field will be
ordered in descending order. Otherwise, it'll be ordered in ascending order.
The special-case field name ``"?"`` specifies random order.
The ordering attribute is not required. If you leave it off, ordering will be
undefined -- not random, just undefined.
"""
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=63, null=True, blank=True)
editor = models.ForeignKey('self', models.CASCADE, null=True)
class Meta:
ordering = ('-pk',)
class Article(models.Model):
author = models.ForeignKey(Author, models.SET_NULL, null=True)
second_author = models.ForeignKey(Author, models.SET_NULL, null=True, related_name='+')
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = (
'-pub_date',
models.F('headline'),
models.F('author__name').asc(),
models.OrderBy(models.F('second_author__name')),
)
class OrderedByAuthorArticle(Article):
class Meta:
proxy = True
ordering = ('author', 'second_author')
class OrderedByFArticle(Article):
class Meta:
proxy = True
ordering = (models.F('author').asc(nulls_first=True), 'id')
class ChildArticle(Article):
pass
class Reference(models.Model):
article = models.ForeignKey(OrderedByAuthorArticle, models.CASCADE)
class Meta:
ordering = ('article',)
|
GiladE/birde | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/auth/tests/test_views.py | 35 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from importlib import import_module
import itertools
import os
import re
from django.apps import apps
from django.conf import global_settings, settings
from django.contrib.sites.requests import RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils._os import upath
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
# Needed so model is installed when tests are run independently:
from django.contrib.auth.tests.custom_user import CustomUser # NOQA
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertTrue('<html>' not in message.get_payload(0).get_payload())
self.assertTrue('<html>' in message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@override_settings(MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.contrib.auth.middleware.SessionAuthenticationMiddleware'
])
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
@skipIfCustomUser
@override_settings(
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.contrib.auth.middleware.SessionAuthenticationMiddleware'
],
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post('/admin/auth/user/%s/password/' % u.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % u.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
|
mrknow/filmkodi | refs/heads/master | plugin.video.fanfilm/resources/lib/resolvers/cloudyvideos.py | 2 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,time
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
page = 'http://cloudyvideos.com/%s' % url
result = client.request(page, close=False)
if '>File Not Found<' in result: raise Exception()
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': ''})
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post = urllib.urlencode(post)
for i in range(0, 5):
try:
result = client.request(page, post=post, close=False)
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) == 0:
result = re.compile('(eval.*?\)\)\))').findall(result)
result = [i for i in result if '|download|' in i][0]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
time.sleep(1)
except:
return
|
DemocracyClub/yournextrepresentative | refs/heads/master | ynr/apps/twitterbot/tests/test_twitter_update_usernames_command.py | 1 | from django.core.management import call_command
from django.test import TestCase, override_settings
from mock import Mock, patch
from candidates.tests.auth import TestUserMixin
from candidates.tests.output import capture_output, split_output
from people.tests.factories import PersonFactory
from twitterbot.helpers import TwitterBot
from twitterbot.management.twitter import TwitterAPIData
def fake_post_for_username_updater(*args, **kwargs):
data = kwargs["data"]
mock_result = Mock()
if "screen_name" in data:
if (
data["screen_name"]
== "notatwitteraccounteither,notreallyatwitteraccount"
):
mock_result.json.return_value = [
{
"id": 321,
"screen_name": "notreallyatwitteraccount",
"profile_image_url_https": "https://example.com/a.jpg",
},
{
"id": 765,
"screen_name": "notatwitteraccounteither",
"profile_image_url_https": "https://example.com/b.jpg",
},
]
return mock_result
if "user_id" in data:
if data["user_id"] == "987":
mock_result.json.return_value = [
{
"id": 987,
"screen_name": "ascreennamewewereunawareof",
"profile_image_url_https": "https://example.com/c.jpg",
}
]
return mock_result
raise Exception("No Twitter API stub for {} {}".format(args, kwargs))
@patch("twitterbot.management.twitter.requests")
class TestUpdateTwitterUsernamesCommand(TestUserMixin, TestCase):
def setUp(self):
for person_details in [
{
"attr": "just_screen_name",
"name": "Person with just a Twitter screen name",
# We'll get the API to return 321 for their user_id
"screen_name": "notreallyatwitteraccount",
},
{
"attr": "just_userid",
"name": "Person with just a Twitter user ID",
"user_id": "987",
},
{"attr": "no_twitter", "name": "Person with no Twitter details"},
{
"attr": "screen_name_and_user_id",
"name": "Someone with a Twitter screen name and user ID",
"user_id": "765",
"screen_name": "notatwitteraccounteither",
},
]:
person = PersonFactory.create(name=person_details["name"])
setattr(self, person_details["attr"], person)
person.tmp_person_identifiers.create(
internal_identifier=person_details.get("user_id", None),
value_type="twitter_username",
value=person_details.get("screen_name", ""),
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_verbose_output(self, mock_requests):
mock_requests.post.side_effect = fake_post_for_username_updater
with capture_output() as (out, err):
call_command("twitterbot_update_usernames", verbosity=3)
self.assertEqual(
split_output(out),
[
"Person with just a Twitter screen name has Twitter screen name (notreallyatwitteraccount) but no user ID",
"Adding the user ID 321",
"Person with just a Twitter user ID has a Twitter user ID: 987",
"Correcting the screen name from None to ascreennamewewereunawareof",
"Person with no Twitter details had no Twitter account information",
"Someone with a Twitter screen name and user ID has a Twitter user ID: 765",
"The screen name (notatwitteraccounteither) was already correct",
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_adds_screen_name(self, mock_requests):
mock_requests.post.side_effect = fake_post_for_username_updater
with capture_output() as (out, err):
call_command("twitterbot_update_usernames")
self.assertEqual(
self.just_userid.get_single_identifier_value(
value_type="twitter_username"
),
"ascreennamewewereunawareof",
)
self.assertEqual(
split_output(out),
[
"Adding the user ID 321",
"Correcting the screen name from None to ascreennamewewereunawareof",
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_adds_user_id(self, mock_requests):
mock_requests.post.side_effect = fake_post_for_username_updater
with capture_output() as (out, err):
call_command("twitterbot_update_usernames")
self.assertEqual(
self.just_screen_name.get_single_identifier_of_type(
"twitter_username"
).internal_identifier,
"321",
)
self.assertEqual(
split_output(out),
[
"Adding the user ID 321",
"Correcting the screen name from None to ascreennamewewereunawareof",
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_screen_name_was_wrong(self, mock_requests):
def fake_post_screen_name_wrong(*args, **kwargs):
data = kwargs["data"]
mock_result = Mock()
if "screen_name" in data:
if (
data["screen_name"]
== "notatwitteraccounteither,notreallyatwitteraccount"
):
mock_result.json.return_value = [
{
"id": 321,
"screen_name": "notreallyatwitteraccount",
"profile_image_url_https": "https://example.com/a.jpg",
}
]
return mock_result
if "user_id" in data:
if data["user_id"] == "765,987":
mock_result.json.return_value = [
{
"id": 987,
"screen_name": "ascreennamewewereunawareof",
"profile_image_url_https": "https://example.com/c.jpg",
},
{
"id": 765,
"screen_name": "changedscreenname",
"profile_image_url_https": "https://example.com/b.jpg",
},
]
return mock_result
raise Exception(
"No Twitter API stub for {} {}".format(args, kwargs)
)
mock_requests.post.side_effect = fake_post_screen_name_wrong
with capture_output() as (out, err):
call_command("twitterbot_update_usernames")
self.assertEqual(
self.screen_name_and_user_id.get_single_identifier_value(
"twitter_username"
),
"changedscreenname",
)
self.assertEqual(
split_output(out),
[
"Adding the user ID 321",
"Correcting the screen name from None to ascreennamewewereunawareof",
"Correcting the screen name from notatwitteraccounteither to changedscreenname",
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_screen_name_disappeared(self, mock_requests):
def fake_post_screen_name_disappeared(*args, **kwargs):
data = kwargs["data"]
mock_result = Mock()
if "screen_name" in data:
if (
data["screen_name"]
== "notatwitteraccounteither,notreallyatwitteraccount"
):
mock_result.json.return_value = [
{
"id": 765,
"screen_name": "notatwitteraccounteither",
"profile_image_url_https": "https://example.com/b.jpg",
}
]
return mock_result
if "user_id" in data:
if data["user_id"] == "987":
mock_result.json.return_value = [
{
"id": 987,
"screen_name": "ascreennamewewereunawareof",
"profile_image_url_https": "https://example.com/c.jpg",
}
]
return mock_result
raise Exception(
"No Twitter API stub for {} {}".format(args, kwargs)
)
mock_requests.post.side_effect = fake_post_screen_name_disappeared
with capture_output() as (out, err):
call_command("twitterbot_update_usernames")
self.assertEqual(self.just_screen_name.get_twitter_username, "")
self.assertEqual(self.just_screen_name.get_twitter_username, "")
self.assertEqual(
split_output(out),
[
"Removing screen name notreallyatwitteraccount for Person "
"with just a Twitter screen name as it is not a valid "
"Twitter screen name. "
"/person/{}/person-with-just-a-twitter-screen-name".format(
self.just_screen_name.id
),
"Correcting the screen name from None to ascreennamewewereunawareof",
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
def test_commmand_user_id_disappeared(self, mock_requests):
def fake_post_user_id_disappeared(*args, **kwargs):
data = kwargs["data"]
mock_result = Mock()
if "screen_name" in data:
if (
data["screen_name"]
== "notatwitteraccounteither,notreallyatwitteraccount"
):
mock_result.json.return_value = [
{
"id": 321,
"screen_name": "notreallyatwitteraccount",
"profile_image_url_https": "https://example.com/a.jpg",
}
]
return mock_result
if "user_id" in data:
if data["user_id"] == "765,987":
mock_result.json.return_value = {
"errors": [
{
"code": 17,
"message": "No user matches for specified terms.",
}
]
}
return mock_result
raise Exception(
"No Twitter API stub for {} {}".format(args, kwargs)
)
mock_requests.post.side_effect = fake_post_user_id_disappeared
self.assertEqual(
self.screen_name_and_user_id.get_twitter_username,
"notatwitteraccounteither",
)
with capture_output() as (out, err):
call_command("twitterbot_update_usernames")
self.assertIsNone(self.just_userid.get_twitter_username, None)
# Clear the cached_property for this object
del self.screen_name_and_user_id.get_all_idenfitiers
self.assertIsNone(self.screen_name_and_user_id.get_twitter_username)
self.assertEqual(
self.screen_name_and_user_id.get_twitter_username, None
)
self.assertEqual(
split_output(out),
[
"Adding the user ID 321",
"Removing user ID 987 for Person with just a Twitter user ID "
"as it is not a valid Twitter user ID. "
"/person/{}/person-with-just-a-twitter-user-id".format(
self.just_userid.id
),
"Removing user ID 765 for Someone with a Twitter screen name "
"and user ID as it is not a valid Twitter user ID. "
"/person/{}/someone-with-a-twitter-screen-name-and-user-id".format(
self.screen_name_and_user_id.id
),
],
)
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN="madeuptoken")
class TestTwitterSuspendedUser(TestUserMixin, TestCase):
@patch.object(TwitterAPIData, "update_from_api")
@patch.object(TwitterBot, "is_user_suspended")
def test_marked_suspended(self, is_user_suspended, update_from_api):
suspended_person = PersonFactory(name="Suspended User")
suspended_person.tmp_person_identifiers.create(
value_type="twitter_username", value="suspendeduser"
)
is_user_suspended.return_value = True
call_command("twitterbot_update_usernames", verbosity=3)
identifier = suspended_person.tmp_person_identifiers.get(
value_type="twitter_username"
)
update_from_api.assert_called_once()
is_user_suspended.assert_called_once()
assert identifier.extra_data["status"] == "suspended"
assert identifier.value == "suspendeduser"
@patch.object(TwitterAPIData, "update_from_api")
@patch.object(TwitterBot, "is_user_suspended")
def test_identifier_value_removed(self, is_user_suspended, update_from_api):
person = PersonFactory(name="Suspended User")
person.tmp_person_identifiers.create(
value_type="twitter_username", value="deactivated"
)
is_user_suspended.return_value = False
call_command("twitterbot_update_usernames", verbosity=3)
identifier = person.tmp_person_identifiers.get(
value_type="twitter_username"
)
update_from_api.assert_called_once()
is_user_suspended.assert_called_once()
assert identifier.value == ""
@patch.object(TwitterAPIData, "update_from_api")
@patch.object(TwitterBot, "is_user_suspended")
def test_identifier_deleted_if_we_have_id(
self, is_user_suspended, update_from_api
):
person = PersonFactory(name="Suspended User")
person.tmp_person_identifiers.create(
value_type="twitter_username",
value="deactivated",
internal_identifier="403",
)
is_user_suspended.return_value = False
call_command("twitterbot_update_usernames", verbosity=3)
update_from_api.assert_called_once()
is_user_suspended.assert_called_once()
assert person.tmp_person_identifiers.count() == 0
@patch.object(TwitterAPIData, "update_from_api")
@patch.object(TwitterBot, "is_user_suspended")
def test_identifier_not_deleted_person_with_id(
self, is_user_suspended, update_from_api
):
person = PersonFactory(name="Suspended User")
person.tmp_person_identifiers.create(
value_type="twitter_username",
value="suspended",
internal_identifier="403",
)
is_user_suspended.return_value = True
call_command("twitterbot_update_usernames", verbosity=3)
identifier = person.tmp_person_identifiers.get(
value_type="twitter_username"
)
update_from_api.assert_called_once()
is_user_suspended.assert_called_once()
assert identifier.internal_identifier == "403"
assert identifier.value == "suspended"
assert identifier.extra_data["status"] == "suspended"
|
ScreamingUdder/mantid | refs/heads/master | qt/widgets/sliceviewer/doc/sliceviewer_wiki_screenshots.py | 3 | """ Generate screenshots for the wiki docs
"""
# Basic parameters
filename = "TOPAZ_3131_event.nxs"
ws = "TOPAZ"
LoadEventNexus(Filename=filename,OutputWorkspace=ws+"_nxs")
ConvertToDiffractionMDWorkspace(InputWorkspace=ws+"_nxs",OutputWorkspace=ws, LorentzCorrection='1', SplitInto='2',SplitThreshold='150')
FindPeaksMD(InputWorkspace=ws,MaxPeaks='500',OutputWorkspace=ws+'_peaks')
FindUBUsingLatticeParameters(PeaksWorkspace=ws+'_peaks',a='10.3522',b='6.0768',c='4.7276', alpha='90',beta='90',gamma='90', NumInitial='20', Tolerance='0.12')
IndexPeaks(PeaksWorkspace=ws+'_peaks', Tolerance='0.12')
CopySample(InputWorkspace=ws+'_peaks',OutputWorkspace=ws+"_nxs", CopyName='0',CopyMaterial='0',CopyEnvironment='0',CopyShape='0')
ConvertToDiffractionMDWorkspace(InputWorkspace=ws+"_nxs", OutputWorkspace='HKL', OutputDimensions='HKL',LorentzCorrection='1', SplitInto='2',SplitThreshold='150')
#================ Start the Screenshots ==========================
import numpy
import numpy as np
from PyQt4 import Qt
import os
svw = plotSlice("hkl", slicepoint=[0,0, -5], colorscalelog=True, limits=[-6.5,-3.5, -2.5,0.5])
svw.setColorScaleAutoSlice()
n = 0
for L in numpy.arange(-5.07, -4.9, 0.01):
svw.setSlicePoint(2, L)
Qt.QApplication.processEvents()
pix = Qt.QPixmap.grabWidget(svw._getHeldObject())
pix.save("/home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/anim%02d.png" % n)
n = n + 1
# This requires imagemagick. Converts to a nifty animated gif.
os.system("convert /home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/anim*.png /home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/SliceViewer_SlicePoint_Animation.gif")
# ============= Line Viewer Animation ===========================
BinMD(InputWorkspace='TOPAZ',AlignedDim0='Q_lab_x, 0, 6, 120',AlignedDim1='Q_lab_y, -3, 3, 120',AlignedDim2='Q_lab_z, 0, 6, 120', OutputWorkspace='bin_q')
sv = plotSlice('bin_q', slicepoint=[0,0, 4.15], colorscalelog=True, limits=[2,4,-1,1])
n = 0
for y in np.linspace(-0.2, 0.2, 21):
lv = sv.showLine(start=[2.5,y], end=[3.5,y], width=0.1)
n += 1
pix = QtGui.QPixmap.grabWidget(sv._getHeldObject())
pix.save("/home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/lvanim%02d.png" % n)
os.system("convert /home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/lvanim*.png /home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/LineViewerAnimation.gif")
# ============= Other LineViewer Screenshots ==============
sv = plotSlice('bin_q', slicepoint=[0,0, 4.15], colorscalelog=True, limits=[2,4,-1,1])
sv.setFastRender(False)
sv.resize( 800, 550)
Qt.QCoreApplication.processEvents()
lv = sv.showLine(start=[2.5,-0.05], end=[3.5,-0.05], width=0.1)
pix = QtGui.QPixmap.grabWidget(sv._getHeldObject())
pix.save("/home/8oz/Code/Mantid/Code/Mantid/MantidQt/SliceViewer/doc/SliceViewer_and_LineViewer.png")
print dir(sv)
# ============= LineViewer Plot Screenshots ==============
CreateMDWorkspace(Dimensions='4',Extents='0,10,0,10,0,10,0,10',Names='Q_x,Q_y,Q_z,E',Units='Angstrom^-1,Angstrom^-1,Angstrom^-1,meV',SplitInto='2',SplitThreshold='100', MaxRecursionDepth='20',OutputWorkspace='ws_4d')
FakeMDEventData(InputWorkspace='ws_4d',UniformParams='1e+06')
FakeMDEventData(InputWorkspace='ws_4d',PeakParams='30000,6,7,5,5,1')
sv = plotSlice('ws_4d', xydim=[2,3], slicepoint=[6,7, 0,0], colorscalelog=True)
sv.setFastRender(False)
lv = sv.showLine(start=[8,2], end=[2,8], width=1)
closeAllSliceViewers()
|
ahmed-mahran/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/gdal/error.py | 466 | """
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception): pass
class OGRException(Exception): pass
class SRSException(Exception): pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = { 1 : (OGRException, 'Not enough data.'),
2 : (OGRException, 'Not enough memory.'),
3 : (OGRException, 'Unsupported geometry type.'),
4 : (OGRException, 'Unsupported operation.'),
5 : (OGRException, 'Corrupt data.'),
6 : (OGRException, 'OGR failure.'),
7 : (SRSException, 'Unsupported SRS.'),
8 : (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
|
InfinitiveOS-Devices/android_kernel_sony_msm8930 | refs/heads/io-1.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
esplinr/foodcheck | refs/heads/master | wsgi/foodcheck_proj/south/tests/deps_b/migrations/0002_b.py | 348 | from south.db import db
from django.db import models
class Migration:
depends_on = [('deps_a', '0002_a')]
def forwards(self):
pass
def backwards(self):
pass
|
AngelkPetkov/titanium_mobile | refs/heads/master | support/module/mobileweb/mobileweb.py | 36 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Mobile Web Module Project Create Script
#
import os,sys,shutil
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
module_dir = os.path.dirname(template_dir)
sys.path.append(module_dir)
import module
def rmdir(path):
if os.path.exists(path):
shutil.rmtree(path, True)
class mobileweb(module.ModulePlatform):
def finished(self):
rmdir(os.path.join(self.project_dir, 'assets'))
rmdir(os.path.join(self.project_dir, 'hooks'))
rmdir(os.path.join(self.project_dir, 'platform'))
|
ryano144/intellij-community | refs/heads/master | python/testData/inspections/PyUnboundLocalVariableInspection/DefaultArgument.py | 83 | def f():
z = 2
def g(z=z): #pass
return z
return g
|
ge0rgi/cinder | refs/heads/stable/ocata | doc/ext/cinder_autodoc.py | 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import gettext
import os
gettext.install('cinder')
from cinder import utils
def setup(app):
print("**Autodocumenting from %s" % os.path.abspath(os.curdir))
rv = utils.execute('./doc/generate_autodoc_index.sh')
print(rv[0])
|
alex/gunicorn | refs/heads/master | examples/frameworks/pylonstest/pylonstest/controllers/error.py | 29 | import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from pylonstest.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
|
AnimeshSinha1309/Website-Edunet | refs/heads/master | WebsiteEdunet/env/Lib/site-packages/django/contrib/messages/storage/fallback.py | 704 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
|
pjdelport/django-crispy-forms | refs/heads/dev | crispy_forms/helper.py | 1 | from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from layout import Layout, LayoutSlice
from utils import render_field
class FormHelpersException(Exception):
"""
This is raised when building a form via helpers throws an error.
We want to catch form helper errors as soon as possible because
debugging templatetags is never fun.
"""
pass
class DynamicLayoutHandler(object):
def all(self):
"""
Returns all layout objects of first level of depth
"""
return LayoutSlice(self.layout, slice(0, len(self.layout.fields), 1))
def filter(self, LayoutClass):
"""
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
"""
filtered_fields = []
for i in range(len(self.layout.fields)):
if isinstance(self.layout.fields[i], LayoutClass):
filtered_fields.append(i)
return LayoutSlice(self.layout, filtered_fields)
def filter_by_widget(self, widget_type):
"""
Returns a LayoutSlice pointing to fields with widgets of `widget_type`
"""
assert(self.layout is not None and self.form is not None)
layout_field_names = self.layout.get_field_names()
# Let's filter all fields with widgets like widget_type
filtered_fields = []
for pointer in layout_field_names:
if isinstance(self.form.fields[pointer[1]].widget, widget_type):
filtered_fields.append(pointer)
return LayoutSlice(self.layout, filtered_fields)
def __getitem__(self, key):
"""
Return a LayoutSlice that makes changes affect the current instance of the layout
and not a copy.
"""
assert(self.layout is not None)
# when key is a string containing the field name
if isinstance(key, basestring):
layout_field_names = self.layout.get_field_names()
filtered_field = []
for pointer in layout_field_names:
if pointer[1] == key:
filtered_field.append(pointer)
return LayoutSlice(self.layout, filtered_field)
return LayoutSlice(self.layout, key)
class FormHelper(DynamicLayoutHandler):
"""
This class controls the form rendering behavior of the form passed to
the `{% crispy %}` tag. For doing so you will need to set its attributes
and pass the corresponding helper object to the tag::
{% crispy form form.helper %}
Let's see what attributes you can set and what form behaviors they apply to:
**form_method**: Specifies form method attribute.
You can see it to 'POST' or 'GET'. Defaults to 'POST'
**form_action**: Applied to the form action attribute:
- Can be a named url in your URLconf that can be executed via the `{% url %}` template tag. \
Example: 'show_my_profile'. In your URLconf you could have something like::
url(r'^show/profile/$', 'show_my_profile_view', name = 'show_my_profile')
- It can simply point to a URL '/whatever/blabla/'.
**form_id**: Generates a form id for dom identification.
If no id provided then no id attribute is created on the form.
**form_class**: String containing separated CSS clases to be applied
to form class attribute. The form will always have by default
'uniForm' class.
**form_tag**: It specifies if <form></form> tags should be rendered when using a Layout.
If set to False it renders the form without the <form></form> tags. Defaults to True.
**form_error_title**: If a form has `non_field_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
Example: "Oooops!" or "Form Errors"
**formset_error_title**: If a formset has `non_form_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
**form_style**: Uni-form has two built in different form styles. You can choose
your favorite. This can be set to "default" or "inline". Defaults to "default".
Public Methods:
**add_input(input)**: You can add input buttons using this method. Inputs
added using this method will be rendered at the end of the form/formset.
**add_layout(layout)**: You can add a `Layout` object to `FormHelper`. The Layout
specifies in a simple, clean and DRY way how the form fields should be rendered.
You can wrap fields, order them, customize pretty much anything in the form.
Best way to add a helper to a form is adding a property named helper to the form
that returns customized `FormHelper` object::
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class MyForm(forms.Form):
title = forms.CharField(_("Title"))
@property
def helper(self):
helper = FormHelper()
helper.form_id = 'this-form-rocks'
helper.form_class = 'search'
helper.add_input(Submit('save', 'save'))
[...]
return helper
You can use it in a template doing::
{% load crispy_forms_tags %}
{% crispy form %}
"""
_form_method = 'post'
_form_action = ''
_form_style = 'default'
form = None
form_id = ''
form_class = ''
layout = None
form_tag = True
form_error_title = None
formset_error_title = None
form_show_errors = True
render_unmentioned_fields = False
help_text_inline = False
def __init__(self, form=None):
self.inputs = []
if form is not None:
self.form = form
self.layout = self.build_default_layout(form)
def build_default_layout(self, form):
return Layout(*form.fields.keys())
def get_form_method(self):
return self._form_method
def set_form_method(self, method):
if method.lower() not in ('get', 'post'):
raise FormHelpersException('Only GET and POST are valid in the \
form_method helper attribute')
self._form_method = method.lower()
# we set properties the old way because we want to support pre-2.6 python
form_method = property(get_form_method, set_form_method)
def get_form_action(self):
try:
return reverse(self._form_action)
except NoReverseMatch:
return self._form_action
def set_form_action(self, action):
self._form_action = action
# we set properties the old way because we want to support pre-2.6 python
form_action = property(get_form_action, set_form_action)
def get_form_style(self):
if self._form_style == "default":
return ''
if self._form_style == "inline":
return 'inlineLabels'
def set_form_style(self, style):
if style.lower() not in ('default', 'inline'):
raise FormHelpersException('Only default and inline are valid in the \
form_style helper attribute')
self._form_style = style.lower()
form_style = property(get_form_style, set_form_style)
def add_input(self, input_object):
self.inputs.append(input_object)
def add_layout(self, layout):
self.layout = layout
def render_layout(self, form, context):
"""
Returns safe html of the rendering of the layout
"""
form.rendered_fields = set()
# This renders the specifed Layout
html = self.layout.render(form, self.form_style, context)
if self.render_unmentioned_fields:
fields = set(form.fields.keys())
left_fields_to_render = fields - form.rendered_fields
for field in left_fields_to_render:
html += render_field(field, form, self.form_style, context)
# If the user has meta fields defined, not included in the layout
# we suppose they need to be rendered. Othewise we renderd the
# layout fields strictly
if getattr(form, 'Meta', None):
fields = set(getattr(form.Meta, 'fields', []))
# Take the fields from the instance since the user might have deleted some
current_fields = set(getattr(form, 'fields', []))
exclude = set(getattr(form.Meta, 'exclude', []))
left_fields_to_render = fields & current_fields - exclude - form.rendered_fields
for field in left_fields_to_render:
html += render_field(field, form, self.form_style, context)
return mark_safe(html)
def get_attributes(self):
"""
Used by crispy_forms_tags to get helper attributes
"""
items = {}
items['form_method'] = self.form_method.strip()
items['form_tag'] = self.form_tag
items['form_style'] = self.form_style.strip()
items['form_show_errors'] = self.form_show_errors
items['help_text_inline'] = self.help_text_inline
if self.form_action:
items['form_action'] = self.form_action.strip()
if self.form_id:
items['id'] = self.form_id.strip()
if self.form_class:
items['class'] = self.form_class.strip()
if self.inputs:
items['inputs'] = self.inputs
if self.form_error_title:
items['form_error_title'] = self.form_error_title.strip()
if self.formset_error_title:
items['formset_error_title'] = self.formset_error_title.strip()
for attribute_name, value in self.__dict__.items():
if attribute_name not in items and attribute_name not in ['layout', 'inputs'] and not attribute_name.startswith('_'):
items[attribute_name] = value
return items
|
sloria/osf.io | refs/heads/develop | website/closed_challenges/views.py | 25 | # -*- coding: utf-8 -*-
def erpc_landing_page(**kwargs):
return {}
|
ajaali/django | refs/heads/master | tests/custom_methods/models.py | 343 | """
Giving models custom methods
Any method you add to a model will be available to instances.
"""
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __str__(self):
return self.headline
def was_published_today(self):
return self.pub_date == datetime.date.today()
def articles_from_same_day_1(self):
return Article.objects.filter(pub_date=self.pub_date).exclude(id=self.id)
def articles_from_same_day_2(self):
"""
Verbose version of get_articles_from_same_day_1, which does a custom
database query for the sake of demonstration.
"""
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("""
SELECT id, headline, pub_date
FROM custom_methods_article
WHERE pub_date = %s
AND id != %s""", [connection.ops.adapt_datefield_value(self.pub_date),
self.id])
return [self.__class__(*row) for row in cursor.fetchall()]
|
xiandiancloud/edx-platform-Y | refs/heads/master | common/lib/xmodule/xmodule/timeinfo.py | 63 | import logging
from xmodule.fields import Timedelta
log = logging.getLogger(__name__)
class TimeInfo(object):
"""
This is a simple object that calculates and stores datetime information for an XModule
based on the due date and the grace period string
So far it parses out three different pieces of time information:
self.display_due_date - the 'official' due date that gets displayed to students
self.grace_period - the length of the grace period
self.close_date - the real due date
"""
_delta_standin = Timedelta()
def __init__(self, due_date, grace_period_string_or_timedelta):
if due_date is not None:
self.display_due_date = due_date
else:
self.display_due_date = None
if grace_period_string_or_timedelta is not None and self.display_due_date:
if isinstance(grace_period_string_or_timedelta, basestring):
try:
self.grace_period = TimeInfo._delta_standin.from_json(grace_period_string_or_timedelta)
except:
log.error("Error parsing the grace period {0}".format(grace_period_string_or_timedelta))
raise
else:
self.grace_period = grace_period_string_or_timedelta
self.close_date = self.display_due_date + self.grace_period
else:
self.grace_period = None
self.close_date = self.display_due_date
|
bucricket/projectMASlst | refs/heads/master | processlst/processlst.py | 1 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 11:51:03 2017
@author: mschull
"""
import os
import glob
import subprocess
import sys
import pyrttov
import argparse
import pycurl
import keyring
import getpass
import ftplib
from .processData import Landsat,RTTOV
from .utils import folders,untar,getFile
from .lndlst_dms import getSharpenedLST
base = os.getcwd()
Folders = folders(base)
landsat_SR = Folders['landsat_SR']
landsat_LST = Folders['landsat_LST']
landsat_temp = Folders['landsat_Temp']
def runRTTOV(profileDict):
nlevels = profileDict['P'].shape[1]
nprofiles = profileDict['P'].shape[0]
myProfiles = pyrttov.Profiles(nprofiles, nlevels)
myProfiles.GasUnits = 2
myProfiles.P = profileDict['P']
myProfiles.T = profileDict['T']
myProfiles.Q = profileDict['Q']
myProfiles.Angles = profileDict['Angles']
myProfiles.S2m = profileDict['S2m']
myProfiles.Skin = profileDict['Skin']
myProfiles.SurfType = profileDict['SurfType']
myProfiles.SurfGeom =profileDict['SurfGeom']
myProfiles.DateTimes = profileDict['Datetimes']
month = profileDict['Datetimes'][0,1]
# ------------------------------------------------------------------------
# Set up Rttov instance
# ------------------------------------------------------------------------
# Create Rttov object for the TIRS instrument
tirsRttov = pyrttov.Rttov()
# nchan_tirs = 1
# Set the options for each Rttov instance:
# - the path to the coefficient file must always be specified
# - specify paths to the emissivity and BRDF atlas data in order to use
# the atlases (the BRDF atlas is only used for VIS/NIR channels so here
# it is unnecessary for HIRS or MHS)
# - turn RTTOV interpolation on (because input pressure levels differ from
# coefficient file levels)
# - set the verbose_wrapper flag to true so the wrapper provides more
# information
# - enable solar simulations for SEVIRI
# - enable CO2 simulations for HIRS (the CO2 profiles are ignored for
# the SEVIRI and MHS simulations)
# - enable the store_trans wrapper option for MHS to provide access to
# RTTOV transmission structure
s = pyrttov.__file__
envPath = os.sep.join(s.split(os.sep)[:-6])
rttovPath = os.path.join(envPath,'share')
rttovCoeffPath = os.path.join(rttovPath,'rttov')
rttovEmisPath = os.path.join(rttovCoeffPath,'emis_data')
rttovBRDFPath = os.path.join(rttovCoeffPath,'brdf_data')
if not os.path.exists(rttovBRDFPath):
print("downloading atlases.....")
ftp = ftplib.FTP("ftp.star.nesdis.noaa.gov")
ftp.login("anonymous", "")
ftp.cwd('/pub/smcd/emb/mschull/') # change directory to /pub/
getFile(ftp,'rttov_atlas.tar')
ftp.quit()
untar('rttov_atlas.tar',rttovPath)
subprocess.check_output("chmod 755 %s%s*.H5" % (rttovEmisPath,os.sep), shell=True)
subprocess.check_output("chmod 755 %s%s*.H5" % (rttovBRDFPath,os.sep), shell=True)
tirsRttov.FileCoef = '{}/{}'.format(rttovCoeffPath,"rtcoef_landsat_8_tirs.dat")
tirsRttov.EmisAtlasPath = rttovEmisPath
tirsRttov.BrdfAtlasPath = rttovBRDFPath
tirsRttov.Options.AddInterp = True
tirsRttov.Options.StoreTrans = True
tirsRttov.Options.StoreRad2 = True
tirsRttov.Options.VerboseWrapper = True
# Load the instruments:
try:
tirsRttov.loadInst()
except pyrttov.RttovError as e:
sys.stderr.write("Error loading instrument(s): {!s}".format(e))
sys.exit(1)
# Associate the profiles with each Rttov instance
tirsRttov.Profiles = myProfiles
# ------------------------------------------------------------------------
# Load the emissivity and BRDF atlases
# ------------------------------------------------------------------------
# Load the emissivity and BRDF atlases:
# - load data for August (month=8)
# - note that we only need to load the IR emissivity once and it is
# available for both SEVIRI and HIRS: we could use either the seviriRttov
# or hirsRttov object to do this
# - for the BRDF atlas, since SEVIRI is the only VIS/NIR instrument we can
# use the single-instrument initialisation
tirsRttov.irEmisAtlasSetup(month)
# ------------------------------------------------------------------------
# Call RTTOV
# ------------------------------------------------------------------------
# Since we want the emissivity/reflectance to be calculated, the
# SurfEmisRefl attribute of the Rttov objects are left uninitialised:
# That way they will be automatically initialise to -1 by the wrapper
# Call the RTTOV direct model for each instrument:
# no arguments are supplied to runDirect so all loaded channels are
# simulated
try:
tirsRttov.runDirect()
except pyrttov.RttovError as e:
sys.stderr.write("Error running RTTOV direct model: {!s}".format(e))
sys.exit(1)
return tirsRttov
def get_lst(earth_user,earth_pass):
sceneIDlist = glob.glob(os.path.join(landsat_temp,'*_MTL.txt'))
# ------------------------------------------------------------------------
# Set up the profile data
# ------------------------------------------------------------------------
for i in xrange(len(sceneIDlist)):
inFN = sceneIDlist[i]
landsat = Landsat(inFN,username = earth_user,
password = earth_pass)
rttov = RTTOV(inFN,username = earth_user,
password = earth_pass)
tifFile = os.path.join(landsat_temp,'%s_lst.tiff'% landsat.sceneID)
binFile = os.path.join(landsat_temp,"lndsr."+landsat.sceneID+".cband6.bin")
if not os.path.exists(tifFile):
profileDict = rttov.preparePROFILEdata()
tiirsRttov = runRTTOV(profileDict)
landsat.processLandsatLST(tiirsRttov,profileDict)
subprocess.call(["gdal_translate","-of", "ENVI", "%s" % tifFile, "%s" % binFile])
#=====sharpen the corrected LST==========================================
getSharpenedLST(inFN)
#=====move files to their respective directories and remove temp
binFN = os.path.join(landsat_temp,'%s.sharpened_band6.bin' % landsat.sceneID)
tifFN = os.path.join(landsat_LST,'%s_lstSharp.tiff' % landsat.sceneID)
subprocess.call(["gdal_translate", "-of","GTiff","%s" % binFN,"%s" % tifFN])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("earth_user", type=str, help="earth Login Username")
parser.add_argument("earth_pass", type=str, help="earth Login Password")
args = parser.parse_args()
earth_user = args.earth_user
earth_pass = args.earth_pass
# =====earthData credentials===============
if earth_user == None:
earth_user = str(getpass.getpass(prompt="earth login username:"))
if keyring.get_password("nasa",earth_user)==None:
earth_pass = str(getpass.getpass(prompt="earth login password:"))
keyring.set_password("nasa",earth_user,earth_pass)
else:
earth_pass = str(keyring.get_password("nasa",earth_user))
get_lst(earth_user,earth_pass)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, pycurl.error):
exit('Received Ctrl + C... Exiting! Bye.', 1) |
wildfireone/ghost | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/__init__.py | 364 | # -*- coding: utf-8 -*-
"""
pygments.styles
~~~~~~~~~~~~~~~
Contains built-in styles.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.plugin import find_plugin_styles
from pygments.util import ClassNotFound
#: Maps style names to 'submodule::classname'.
STYLE_MAP = {
'default': 'default::DefaultStyle',
'emacs': 'emacs::EmacsStyle',
'friendly': 'friendly::FriendlyStyle',
'colorful': 'colorful::ColorfulStyle',
'autumn': 'autumn::AutumnStyle',
'murphy': 'murphy::MurphyStyle',
'manni': 'manni::ManniStyle',
'monokai': 'monokai::MonokaiStyle',
'perldoc': 'perldoc::PerldocStyle',
'pastie': 'pastie::PastieStyle',
'borland': 'borland::BorlandStyle',
'trac': 'trac::TracStyle',
'native': 'native::NativeStyle',
'fruity': 'fruity::FruityStyle',
'bw': 'bw::BlackWhiteStyle',
'vim': 'vim::VimStyle',
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
}
def get_style_by_name(name):
if name in STYLE_MAP:
mod, cls = STYLE_MAP[name].split('::')
builtin = "yes"
else:
for found_name, style in find_plugin_styles():
if name == found_name:
return style
# perhaps it got dropped into our styles package
builtin = ""
mod = name
cls = name.title() + "Style"
try:
mod = __import__('pygments.styles.' + mod, None, None, [cls])
except ImportError:
raise ClassNotFound("Could not find style module %r" % mod +
(builtin and ", though it should be builtin") + ".")
try:
return getattr(mod, cls)
except AttributeError:
raise ClassNotFound("Could not find style class %r in style module." % cls)
def get_all_styles():
"""Return an generator for all styles by name,
both builtin and plugin."""
for name in STYLE_MAP:
yield name
for name, _ in find_plugin_styles():
yield name
|
vauxoo-dev/e-commerce | refs/heads/8.0 | website_sale_product_brand/__openerp__.py | 7 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Product Brand Filtering in Website',
'category': 'e-commerce',
'author': "Serpent Consulting Services Pvt. Ltd,"
"Odoo Community Association (OCA)",
'website': 'http://www.serpentcs.com',
'version': '8.0.1.0.0',
'depends': [
'product_brand',
'website_sale'
],
'data': [
"security/ir.model.access.csv",
"views/product_brand.xml",
],
'installable': True,
'auto_install': False,
}
|
iut-ibk/DynaMind-UrbanSim | refs/heads/master | 3rdparty/opus/src/urbansim_parcel/household/tests/__init__.py | 538 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
|
FluidityProject/multifluids | refs/heads/master | tests/channel-flow-dg/channel_viscous.py | 2 | import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0, 0, 0, <dx>};
Extrude {0, 1, 0} {
Point{1};Layers{<layers>};
}
Point(3) = {1, 0, 0, <dx>};
Extrude {0, 1, 0} {
Point{3};Layers{<layers>};
}
Line(3)={1,3};
Line(4)={2,4};
Line Loop(5) = {4, -2, -3, 1};
Plane Surface(6) = {5};
Physical Line(1) = {1};
Physical Line(2) = {2};
Physical Line(3) = {4, 3};
Physical Surface(1) = {6};
'''
def generate_meshfile(name,layers):
file(name+".geo",'w').write(
meshtemplate.replace('<dx>',str(1./layers)
).replace('<layers>',str(layers)))
os.system("gmsh -2 "+name+".geo")
os.system("../../scripts/gmsh2triangle --2d "+name+".msh")
def run_test(layers, binary):
'''run_test(layers, binary)
Run a single test of the channel problem. Layers is the number of mesh
points in the cross-channel direction. The mesh is unstructured and
isotropic. binary is a string containing the fluidity command to run.
The return value is the error in u and p at the end of the simulation.'''
generate_meshfile("channel",layers)
os.system(binary+" channel_viscous.flml")
s=stat_parser("channel-flow-dg.stat")
return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1],
s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
def forcing(X):
'''Forcing function. Must be an analytic function of X[1] only'''
return (X[1]**3,0)
# Numeric verision of the forcing function, for efficiency.
def numeric_forcing(X):
'''Forcing function. Must be an analytic function of X[1] only'''
from math import sin, pi
return (X[1]**3,0)
#Viscosity
mu=1.0
#Note that because Coriolis can't be set from Python, the user has to ensure
#that this matches what it in the flml.
coriolis=1.0
#coriolis=0.0
def analytic_solution(forcing):
'''Solve the ode d^2u/dx^2 = F/mu subject to u(0)=0, u(1)=0'''
x=Symbol('x')
# Constants of integration.
c1=Symbol('c_1')
c2=Symbol('c_2')
general=integrate(integrate(-forcing((0,x))[0]/mu,x)+c1,x)+c2
constants = solve((Eq(general.subs(x,0),0),
Eq(general.subs(x,1),0)), c1,c2)
specific=general.subs(constants)
return specific
def solution(forcing):
'''Return a function which is the solution to:
d^2u/dx^2 = F/mu subject to u(0)=0, u(1)=0'''
def sol(sx):
return analytic_solution(forcing).subs(Symbol('x'),sx[1])
return sol
# absorption=0.5
# def analytic_solution(forcing):
# '''Return the steady state of the ode du/dt = F - Au'''
# x=Symbol('x')
# u=forcing((0.0,x))[0]/absorption
# return u
# def solution(forcing):
# '''Return a function which is the solution to:
# ode du/dt = F - Au'''
# def sol(sx):
# return analytic_solution(forcing).subs(Symbol('x'),sx[1])
# return sol
def analytic_pressure_solution(forcing):
u=analytic_solution(forcing)
return integrate(-coriolis*u+forcing((0,Symbol('x')))[1], Symbol('x'))
def pressure_solution(forcing):
'''Return a function which is the solution to:
dp/dx = f x u The constant of integration is set to 0.'''
def sol(sx):
return analytic_pressure_solution(forcing).subs(Symbol('x'),sx[1])
return sol
def plot_theory():
'''Produce a plot showing the forcing, analytic velocity solution and
analytic pressure solution'''
from pylab import \
plot,figure,quiver,frange,subplot,xticks,yticks,axis,xlabel,ylabel, \
subplots_adjust
figure()
y=frange(0.0,1,0.05)
psol=pressure_solution(forcing)
usol=solution(forcing)
v=0*y
x=0*y
us=array([float(usol(pos)) for pos in zip(x,y)])
ps=array([float(psol(pos)) for pos in zip(x,y)])
uf=array([forcing(pos) for pos in zip(x,y)])[:,0]
subplots_adjust(wspace=0.25)
subplot(1,3,1)
quiver(x[1:-1],y[1:-1],uf[1:-1],v[1:-1], scale=1)
plot(uf,y)
xticks([0,0.5,1],map(str,[0,0.5,1]))
yticks([ 0 , 0.2, 0.4, 0.6, 0.8, 1 ],map(str,[ 0 , 0.2, 0.4, 0.6, 0.8, 1 ]))
ylabel("y")
xlabel("u source")
subplot(1,3,2)
plot(us,y)
quiver(x[1:-1],y[1:-1],us[1:-1],v[1:-1], scale=.03)
xticks([0,0.01,0.02,0.03],map(str,[0,0.01,0.02,0.03]))
yticks([])
xlabel("u solution")
subplot(1,3,3)
plot(ps,y)
xticks([-0.02,-0.01,0],map(str,[-0.02,-0.01,0]))
yticks([])
xlabel("p solution")
return uf,us,ps
def plot_stored_results():
import pickle
(dx,error)=pickle.load(file("error_results"))
plot_results(dx, error)
def plot_results(dx, error):
'''plot_results(error)
Produce a plot of the actual errors provided in the argument
"error". Error should be a two column matrix with the first column being
the velocity error and the second column the pressure error.
'''
from pylab import \
plot,figure,quiver,frange,subplot,xticks,yticks,axis,xlabel,ylabel, \
subplots_adjust,loglog,legend
figure()
loglog(dx,error)
loglog(dx,0.03*dx**2)
yticks(yticks()[0], map(lambda x: "%3.1e"%x, yticks()[0]))
xticks(xticks()[0], map(lambda x: "%3.1e"%x, xticks()[0]))
xlabel("dx")
legend(("u error","p error","O(dx^2)"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.