repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
fabtrompet/bomberman
|
refs/heads/master
|
testando.py
|
1
|
from Tkinter import *
root = Tk()
def find_in_grid(frame, row, column):
for children in frame.children.values():
info = children.grid_info()
#note that rows and column numbers are stored as string
if info['row'] == str(row) and info['column'] == str(column):
return children
return None
#create an array of button
width = 10
for i in range(width):
for j in range(width):
b = Button(root, text=str(i*width+j))
b.grid(row=i, column=j)
#Create two entries to set row and column to find. Changing entries print the
#text of the button (and flash it on compatible platforms)
def update(var, value, op):
r = row.get()
c = col.get()
b = find_in_grid(root, r, c)
if b:
print "button ({0},{1}) : {2}".format(r, c, b["text"])
b.flash()
Label(root,text="row:").grid(row=width,column=0)
row = StringVar()
row.trace('w',update)
Entry(root,textvar=row, width=3).grid(row=width,column=1)
Label(root,text="col:").grid(row=width,column=2)
col = StringVar()
col.trace('w',update)
Entry(root,textvar=col, width=3).grid(row=width,column=3)
row.set('3')
col.set('2')
mainloop()
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/modules/notification/irc.py
|
32
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: irc
version_added: "1.2"
short_description: Send a message to an IRC channel
description:
- Send a message to an IRC channel. This is a very simplistic implementation.
options:
server:
description:
- IRC server name/address
default: localhost
port:
description:
- IRC server port number
default: 6667
nick:
description:
- Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
default: ansible
msg:
description:
- The message body.
required: true
topic:
description:
- Set the channel topic
version_added: "2.0"
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
Added 11 more colors in version 2.0.
default: "none"
choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
"light_blue", "pink", "gray", "light_gray"]
channel:
description:
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
required: true
nick_to:
description:
- A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
version_added: "2.0"
key:
description:
- Channel key
version_added: "1.7"
passwd:
description:
- Server password
timeout:
description:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
version_added: "1.5"
use_ssl:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
type: bool
default: 'no'
version_added: "1.8"
part:
description:
- Designates whether user should part from channel after sending message or not.
Useful for when using a faux bot and not wanting join/parts between messages.
type: bool
default: 'yes'
version_added: "2.0"
style:
description:
- Text style for the message. Note italic does not work on some clients
choices: [ "bold", "underline", "reverse", "italic" ]
version_added: "2.0"
# informational: requirements for nodes
requirements: [ socket ]
author:
- "Jan-Piet Mens (@jpmens)"
- "Matt Martz (@sivel)"
'''
EXAMPLES = '''
- irc:
server: irc.example.net
channel: #t1
msg: Hello world
- local_action:
module: irc
port: 6669
server: irc.example.net
channel: #t1
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
- local_action:
module: irc
port: 6669
server: irc.example.net
channel: #t1
nick_to:
- nick1
- nick2
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
'''
# ===========================================
# IRC module support methods.
#
import re
import socket
import ssl
import time
import traceback
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
'''send message to IRC'''
nick_to = [] if nick_to is None else nick_to
colornumbers = {
'white': "00",
'black': "01",
'blue': "02",
'green': "03",
'red': "04",
'brown': "05",
'purple': "06",
'orange': "07",
'yellow': "08",
'light_green': "09",
'teal': "10",
'light_cyan': "11",
'light_blue': "12",
'pink': "13",
'gray': "14",
'light_gray': "15",
}
stylechoices = {
'bold': "\x02",
'underline': "\x1F",
'reverse': "\x16",
'italic': "\x1D",
}
try:
styletext = stylechoices[style]
except Exception:
styletext = ""
try:
colornumber = colornumbers[color]
colortext = "\x03" + colornumber
except Exception:
colortext = ""
message = styletext + colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
irc = ssl.wrap_socket(irc)
irc.connect((server, int(port)))
if passwd:
irc.send(to_bytes('PASS %s\r\n' % passwd))
irc.send(to_bytes('NICK %s\r\n' % nick))
irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
motd = ''
start = time.time()
while 1:
motd += to_native(irc.recv(1024))
# The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match:
nick = match.group('nick')
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC server welcome response')
time.sleep(0.5)
if key:
irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
else:
irc.send(to_bytes('JOIN %s\r\n' % channel))
join = ''
start = time.time()
while 1:
join += to_native(irc.recv(1024))
if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')
time.sleep(0.5)
if topic is not None:
irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
time.sleep(1)
if nick_to:
for nick in nick_to:
irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
if channel:
irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
time.sleep(1)
if part:
irc.send(to_bytes('PART %s\r\n' % channel))
irc.send(to_bytes('QUIT\r\n'))
time.sleep(1)
irc.close()
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server=dict(default='localhost'),
port=dict(type='int', default=6667),
nick=dict(default='ansible'),
nick_to=dict(required=False, type='list'),
msg=dict(required=True),
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
"green", "red", "brown",
"purple", "orange", "yellow",
"light_green", "teal", "light_cyan",
"light_blue", "pink", "gray",
"light_gray", "none"]),
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
channel=dict(required=False),
key=dict(no_log=True),
topic=dict(),
passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=False)
),
supports_check_mode=True,
required_one_of=[['channel', 'nick_to']]
)
server = module.params["server"]
port = module.params["port"]
nick = module.params["nick"]
nick_to = module.params["nick_to"]
msg = module.params["msg"]
color = module.params["color"]
channel = module.params["channel"]
topic = module.params["topic"]
if topic and not channel:
module.fail_json(msg="When topic is specified, a channel is required.")
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
part = module.params["part"]
style = module.params["style"]
try:
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
except Exception as e:
module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=False, channel=channel, nick=nick,
msg=msg)
if __name__ == '__main__':
main()
|
wzyuliyang/scrapy
|
refs/heads/master
|
extras/qps-bench-server.py
|
178
|
#!/usr/bin/env python
from __future__ import print_function
from time import time
from collections import deque
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.concurrent = 0
self.tail = deque(maxlen=100)
self._reset_stats()
def _reset_stats(self):
self.tail.clear()
self.start = self.lastmark = self.lasttime = time()
def getChild(self, request, name):
return self
def render(self, request):
now = time()
delta = now - self.lasttime
# reset stats on high iter-request times caused by client restarts
if delta > 3: # seconds
self._reset_stats()
return ''
self.tail.appendleft(delta)
self.lasttime = now
self.concurrent += 1
if now - self.lastmark >= 3:
self.lastmark = now
qps = len(self.tail) / sum(self.tail)
print('samplesize={0} concurrent={1} qps={2:0.2f}'.format(len(self.tail), self.concurrent, qps))
if 'latency' in request.args:
latency = float(request.args['latency'][0])
reactor.callLater(latency, self._finish, request)
return NOT_DONE_YET
self.concurrent -= 1
return ''
def _finish(self, request):
self.concurrent -= 1
if not request.finished and not request._disconnected:
request.finish()
root = Root()
factory = Site(root)
reactor.listenTCP(8880, factory)
reactor.run()
|
vwc/agita
|
refs/heads/master
|
src/agita.sitetheme/setup.py
|
1
|
from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='agita.sitetheme',
version=version,
description="agita Diazo Plone Theme",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='Vorwaerts Werbung GbR',
author_email='hallo@vorwaerts-werbung.de',
url='http://dist.vorwaerts-werbung.de',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['agita'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.theming',
'plone.app.themingplugins',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
sdarji/lpthw
|
refs/heads/master
|
Lib/site-packages/pip/index.py
|
343
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
|
Architektor/PySnip
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/internet/test/test_epollreactor.py
|
39
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.epollreactor}.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import TestCase
try:
from twisted.internet.epollreactor import _ContinuousPolling
except ImportError:
_ContinuousPolling = None
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionDone
class Descriptor(object):
"""
Records reads and writes, as if it were a C{FileDescriptor}.
"""
def __init__(self):
self.events = []
def fileno(self):
return 1
def doRead(self):
self.events.append("read")
def doWrite(self):
self.events.append("write")
def connectionLost(self, reason):
reason.trap(ConnectionDone)
self.events.append("lost")
class ContinuousPollingTests(TestCase):
"""
L{_ContinuousPolling} can be used to read and write from C{FileDescriptor}
objects.
"""
def test_addReader(self):
"""
Adding a reader when there was previously no reader starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertEqual(poller._loop, None)
reader = object()
self.assertFalse(poller.isReading(reader))
poller.addReader(reader)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isReading(reader))
def test_addWriter(self):
"""
Adding a writer when there was previously no writer starts up a
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
self.assertEqual(poller._loop, None)
writer = object()
self.assertFalse(poller.isWriting(writer))
poller.addWriter(writer)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertIs(poller._loop.clock, poller._reactor)
self.assertTrue(poller.isWriting(writer))
def test_removeReader(self):
"""
Removing a reader stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
poller.removeReader(reader)
self.assertEqual(poller._loop, None)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isReading(reader))
def test_removeWriter(self):
"""
Removing a writer stops the C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
poller.removeWriter(writer)
self.assertEqual(poller._loop, None)
self.assertEqual(poller._reactor.getDelayedCalls(), [])
self.assertFalse(poller.isWriting(writer))
def test_removeUnknown(self):
"""
Removing unknown readers and writers silently does nothing.
"""
poller = _ContinuousPolling(Clock())
poller.removeWriter(object())
poller.removeReader(object())
def test_multipleReadersAndWriters(self):
"""
Adding multiple readers and writers results in a single
C{LoopingCall}.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertNotEqual(poller._loop, None)
poller.addWriter(object())
self.assertNotEqual(poller._loop, None)
poller.addReader(object())
self.assertNotEqual(poller._loop, None)
poller.addReader(object())
poller.removeWriter(writer)
self.assertNotEqual(poller._loop, None)
self.assertTrue(poller._loop.running)
self.assertEqual(len(poller._reactor.getDelayedCalls()), 1)
def test_readerPolling(self):
"""
Adding a reader causes its C{doRead} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read"])
reactor.advance(0.00001)
self.assertEqual(desc.events, ["read", "read", "read"])
def test_writerPolling(self):
"""
Adding a writer causes its C{doWrite} to be called every 1
milliseconds.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write"])
reactor.advance(0.001)
self.assertEqual(desc.events, ["write", "write", "write"])
def test_connectionLostOnRead(self):
"""
If a C{doRead} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doRead = lambda: ConnectionDone()
poller.addReader(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_connectionLostOnWrite(self):
"""
If a C{doWrite} returns a value indicating disconnection,
C{connectionLost} is called on it.
"""
reactor = Clock()
poller = _ContinuousPolling(reactor)
desc = Descriptor()
desc.doWrite = lambda: ConnectionDone()
poller.addWriter(desc)
self.assertEqual(desc.events, [])
reactor.advance(0.001)
self.assertEqual(desc.events, ["lost"])
def test_removeAll(self):
"""
L{_ContinuousPolling.removeAll} removes all descriptors and returns
the readers and writers.
"""
poller = _ContinuousPolling(Clock())
reader = object()
writer = object()
both = object()
poller.addReader(reader)
poller.addReader(both)
poller.addWriter(writer)
poller.addWriter(both)
removed = poller.removeAll()
self.assertEqual(poller.getReaders(), [])
self.assertEqual(poller.getWriters(), [])
self.assertEqual(len(removed), 3)
self.assertEqual(set(removed), set([reader, writer, both]))
def test_getReaders(self):
"""
L{_ContinuousPolling.getReaders} returns a list of the read
descriptors.
"""
poller = _ContinuousPolling(Clock())
reader = object()
poller.addReader(reader)
self.assertIn(reader, poller.getReaders())
def test_getWriters(self):
"""
L{_ContinuousPolling.getWriters} returns a list of the write
descriptors.
"""
poller = _ContinuousPolling(Clock())
writer = object()
poller.addWriter(writer)
self.assertIn(writer, poller.getWriters())
if _ContinuousPolling is None:
skip = "epoll not supported in this environment."
|
Zord13appdesa/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/web/test/test_soap.py
|
55
|
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Test SOAP support."""
try:
import SOAPpy
except ImportError:
SOAPpy = None
class SOAPPublisher: pass
else:
from twisted.web import soap
SOAPPublisher = soap.SOAPPublisher
from twisted.trial import unittest
from twisted.web import server, error
from twisted.internet import reactor, defer
class Test(SOAPPublisher):
def soap_add(self, a, b):
return a + b
def soap_kwargs(self, a=1, b=2):
return a + b
soap_kwargs.useKeywords=True
def soap_triple(self, string, num):
return [string, num, None]
def soap_struct(self):
return SOAPpy.structType({"a": "c"})
def soap_defer(self, x):
return defer.succeed(x)
def soap_deferFail(self):
return defer.fail(ValueError())
def soap_fail(self):
raise RuntimeError
def soap_deferFault(self):
return defer.fail(ValueError())
def soap_complex(self):
return {"a": ["b", "c", 12, []], "D": "foo"}
def soap_dict(self, map, key):
return map[key]
class SOAPTestCase(unittest.TestCase):
def setUp(self):
self.publisher = Test()
self.p = reactor.listenTCP(0, server.Site(self.publisher),
interface="127.0.0.1")
self.port = self.p.getHost().port
def tearDown(self):
return self.p.stopListening()
def proxy(self):
return soap.Proxy("http://127.0.0.1:%d/" % self.port)
def testResults(self):
inputOutput = [
("add", (2, 3), 5),
("defer", ("a",), "a"),
("dict", ({"a": 1}, "a"), 1),
("triple", ("a", 1), ["a", 1, None])]
dl = []
for meth, args, outp in inputOutput:
d = self.proxy().callRemote(meth, *args)
d.addCallback(self.assertEquals, outp)
dl.append(d)
# SOAPpy kinda blows.
d = self.proxy().callRemote('complex')
d.addCallback(lambda result: result._asdict())
d.addCallback(self.assertEquals, {"a": ["b", "c", 12, []], "D": "foo"})
dl.append(d)
# We now return to our regularly scheduled program, already in progress.
return defer.DeferredList(dl, fireOnOneErrback=True)
def testMethodNotFound(self):
"""
Check that a non existing method return error 500.
"""
d = self.proxy().callRemote('doesntexist')
self.assertFailure(d, error.Error)
def cb(err):
self.assertEquals(int(err.status), 500)
d.addCallback(cb)
return d
def testLookupFunction(self):
"""
Test lookupFunction method on publisher, to see available remote
methods.
"""
self.assertTrue(self.publisher.lookupFunction("add"))
self.assertTrue(self.publisher.lookupFunction("fail"))
self.assertFalse(self.publisher.lookupFunction("foobar"))
if not SOAPpy:
SOAPTestCase.skip = "SOAPpy not installed"
|
KitKatXperience/platform_external_chromium_org
|
refs/heads/kk
|
chrome/test/functional/chromeos_ephemeral.py
|
42
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import pyauto_functional # Must come before pyauto (and thus, policy_base).
import policy_base
sys.path.append('/usr/local') # Required to import autotest libs.
from autotest.cros import constants
from autotest.cros import cryptohome
class ChromeosEphemeral(policy_base.PolicyTestBase):
"""Tests a policy that makes users ephemeral.
When this policy is enabled, no persistent information in the form of
cryptohome shadow directories or local state prefs should be created for
users. Additionally, any persistent information previously accumulated should
be cleared when a user first logs in after enabling the policy."""
_usernames = ('alice@example.com', 'bob@example.com')
def _SetEphemeralUsersEnabled(self, enabled):
"""Sets the ephemeral users device policy.
The show_user_names policy is set to False to ensure that even if the local
state is not being automatically cleared, the login screen never shows user
pods. This is required by the Login browser automation call.
"""
self.SetDevicePolicy({'ephemeral_users_enabled': enabled,
'show_user_names': False})
def _DoesVaultDirectoryExist(self, user_index):
user_hash = cryptohome.get_user_hash(self._usernames[user_index])
return os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash))
def _AssertLocalStatePrefsSet(self, user_indexes):
expected = sorted([self._usernames[index] for index in user_indexes])
# The OAuthTokenStatus pref is populated asynchronously. Checking whether it
# is set would lead to an ugly race.
for pref in ['LoggedInUsers', 'UserImages', 'UserDisplayEmail', ]:
actual = sorted(self.GetLocalStatePrefsInfo().Prefs(pref))
self.assertEqual(actual, expected,
msg='Expected to find prefs in local state for users.')
def _AssertLocalStatePrefsEmpty(self):
for pref in ['LoggedInUsers',
'UserImages',
'UserDisplayEmail',
'OAuthTokenStatus']:
self.assertFalse(self.GetLocalStatePrefsInfo().Prefs(pref),
msg='Expected to not find prefs in local state for any user.')
def _AssertVaultDirectoryExists(self, user_index):
self.assertTrue(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to exist.')
def _AssertVaultDirectoryDoesNotExist(self, user_index):
self.assertFalse(self._DoesVaultDirectoryExist(user_index=user_index),
msg='Expected vault shadow directory to not exist.')
def _AssertVaultMounted(self, user_index, ephemeral):
if ephemeral:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_EPHEMERAL
fs_regex = constants.CRYPTOHOME_FS_REGEX_TMPFS
else:
device_regex = constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW
fs_regex = constants.CRYPTOHOME_FS_REGEX_ANY
self.assertTrue(
cryptohome.is_vault_mounted(device_regex=device_regex,
fs_regex=fs_regex,
user=self._usernames[user_index],
allow_fail=True),
msg='Expected vault backed by %s to be mounted.' %
'tmpfs' if ephemeral else 'shadow directory')
def _AssertNoVaultMounted(self):
self.assertFalse(cryptohome.is_vault_mounted(allow_fail=True),
msg='Did not expect any vault to be mounted.')
def Login(self, user_index):
"""Convenience method to login to the usr at the given index."""
self.assertFalse(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged out.')
policy_base.PolicyTestBase.Login(self,
self._usernames[user_index],
'dummy_password')
self.assertTrue(self.GetLoginInfo()['is_logged_in'],
msg='Expected to be logged in.')
def testEnablingBeforeSession(self):
"""Checks that a new session can be made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsEmpty()
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingDuringSession(self):
"""Checks that an existing non-ephemeral session is not made ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self._SetEphemeralUsersEnabled(True)
self._AssertLocalStatePrefsSet(user_indexes=[0])
self._AssertVaultMounted(user_index=0, ephemeral=False)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testDisablingDuringSession(self):
"""Checks that an existing ephemeral session is not made non-ephemeral."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(True)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self._SetEphemeralUsersEnabled(False)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertLocalStatePrefsEmpty()
self._AssertNoVaultMounted()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
def testEnablingEphemeralUsersCleansUp(self):
"""Checks that persistent information is cleared."""
self.PrepareToWaitForLoginFormReload()
self._SetEphemeralUsersEnabled(False)
self.WaitForLoginFormReload()
self.Login(user_index=0)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0])
self.Login(user_index=1)
self.Logout()
self._AssertLocalStatePrefsSet(user_indexes=[0, 1])
self._AssertVaultDirectoryExists(user_index=0)
self._AssertVaultDirectoryExists(user_index=1)
self._SetEphemeralUsersEnabled(True)
self.Login(user_index=0)
self._AssertVaultMounted(user_index=0, ephemeral=True)
self.Logout()
self._AssertVaultDirectoryDoesNotExist(user_index=0)
self._AssertVaultDirectoryDoesNotExist(user_index=1)
if __name__ == '__main__':
pyauto_functional.Main()
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_instance_type.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_instance_type
short_description: Module to manage Instance Types in oVirt/RHV
version_added: "2.8"
author:
- Martin Necas (@mnecas)
- Ondra Machacek (@machacekondra)
description:
- This module manages whole lifecycle of the Instance Type in oVirt/RHV.
options:
name:
description:
- Name of the Instance Type to manage.
- If instance type don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
id:
description:
- ID of the Instance Type to manage.
state:
description:
- Should the Instance Type be present/absent.
- I(present) state will create/update instance type and don't change its state if it already exists.
choices: [ absent, present ]
default: present
memory:
description:
- Amount of memory of the Instance Type. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
memory_guaranteed:
description:
- Amount of minimal guaranteed memory of the Instance Type.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
- Default value is set by engine.
nics:
description:
- List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
- C(name) - Name of the NIC.
- C(profile_name) - Profile name where NIC should be attached.
- C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
- C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
To manage NICs of the instance type in more depth please use M(ovirt_nics) module instead.
memory_max:
description:
- Upper bound of instance type memory up to which memory hot-plug can be performed.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
cpu_cores:
description:
- Number of virtual CPUs cores of the Instance Type.
- Default value is set by oVirt/RHV engine.
cpu_sockets:
description:
- Number of virtual CPUs sockets of the Instance Type.
- Default value is set by oVirt/RHV engine.
cpu_threads:
description:
- Number of virtual CPUs sockets of the Instance Type.
- Default value is set by oVirt/RHV engine.
operating_system:
description:
- Operating system of the Instance Type.
- Default value is set by oVirt/RHV engine.
- "Possible values: debian_7, freebsd, freebsdx64, other, other_linux,
other_linux_ppc64, other_ppc64, rhel_3, rhel_4, rhel_4x64, rhel_5, rhel_5x64,
rhel_6, rhel_6x64, rhel_6_ppc64, rhel_7x64, rhel_7_ppc64, sles_11, sles_11_ppc64,
ubuntu_12_04, ubuntu_12_10, ubuntu_13_04, ubuntu_13_10, ubuntu_14_04, ubuntu_14_04_ppc64,
windows_10, windows_10x64, windows_2003, windows_2003x64, windows_2008, windows_2008x64,
windows_2008r2x64, windows_2008R2x64, windows_2012x64, windows_2012R2x64, windows_7,
windows_7x64, windows_8, windows_8x64, windows_xp"
boot_devices:
description:
- List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
- Default value is set by oVirt/RHV engine.
choices: [ cdrom, hd, network ]
serial_console:
description:
- "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
usb_support:
description:
- "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
high_availability:
description:
- If I(yes) Instance Type will be set as highly available.
- If I(no) Instance Type won't be set as highly available.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
high_availability_priority:
description:
- Indicates the priority of the instance type inside the run and migration queues.
Instance Type with higher priorities will be started and migrated before instance types with lower
priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
- If no value is passed, default value is set by oVirt/RHV engine.
watchdog:
description:
- "Assign watchdog device for the instance type."
- "Watchdogs is a dictionary which can have following values:"
- "C(model) - Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
- "C(action) - Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
host:
description:
- Specify host where Instance Type should be running. By default the host is chosen by engine scheduler.
- This parameter is used only when C(state) is I(running) or I(present).
graphical_console:
description:
- "Assign graphical console to the instance type."
- "Graphical console is a dictionary which can have following values:"
- "C(headless_mode) - If I(true) disable the graphics console for this instance type."
- "C(protocol) - Graphical protocol, a list of I(spice), I(vnc), or both."
description:
description:
- "Description of the instance type."
cpu_mode:
description:
- "CPU mode of the instance type. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
- "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
- "If no value is passed, default value is set by oVirt/RHV engine."
rng_device:
description:
- "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
- "In order to select I(hwrng), you must have it enabled on cluster first."
- "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
rng_bytes:
description:
- "Number of bytes allowed to consume per period."
rng_period:
description:
- "Duration of one period in milliseconds."
placement_policy:
description:
- "The configuration of the instance type's placement policy."
- "Placement policy can be one of the following values:"
- "C(migratable) - Allow manual and automatic migration."
- "C(pinned) - Do not allow migration."
- "C(user_migratable) - Allow manual migration only."
- "If no value is passed, default value is set by oVirt/RHV engine."
cpu_pinning:
description:
- "CPU Pinning topology to map instance type CPU to host CPU."
- "CPU Pinning topology is a list of dictionary which can have following values:"
- "C(cpu) - Number of the host CPU."
- "C(vcpu) - Number of the instance type CPU."
soundcard_enabled:
description:
- "If I(true), the sound card is added to the instance type."
type: bool
smartcard_enabled:
description:
- "If I(true), use smart card authentication."
type: bool
virtio_scsi:
description:
- "If I(true), virtio scsi will be enabled."
type: bool
io_threads:
description:
- "Number of IO threads used by instance type. I(0) means IO threading disabled."
ballooning_enabled:
description:
- "If I(true), use memory ballooning."
- "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
based on instance type needs in a dynamic way. In this way it's possible to create memory over commitment states."
type: bool
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create instance type
- name: Create instance type
ovirt_instance_type:
state: present
name: myit
rng_device: hwrng
rng_bytes: 200
rng_period: 200
soundcard_enabled: true
virtio_scsi: true
boot_devices:
- network
# Remove instance type
- ovirt_instance_type:
state: absent
name: myit
# Create instance type with predefined memory and cpu limits.
- ovirt_instance_type:
state: present
name: myit
memory: 2GiB
cpu_cores: 2
cpu_sockets: 2
nics:
- name: nic1
# Enable usb suppport and serial console
- ovirt_instance_type:
name: myit
usb_support: True
serial_console: True
# Use graphical console with spice and vnc
- name: Create a instance type that has the console configured for both Spice and VNC
ovirt_instance_type:
name: myit
graphical_console:
protocol:
- spice
- vnc
'''
RETURN = '''
id:
description: ID of the instance type which is managed
returned: On success if instance type is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
instancetype:
description: "Dictionary of all the instance type attributes. instance type attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/instance_type."
returned: On success if instance type is found.
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
import traceback
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
convert_to_bytes,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
get_id_by_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
wait,
)
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
class InstanceTypeModule(BaseModule):
def build_entity(self):
return otypes.InstanceType(
id=self.param('id'),
name=self.param('name'),
console=(
otypes.Console(enabled=self.param('serial_console'))
) if self.param('serial_console') is not None else None,
usb=(
otypes.Usb(enabled=self.param('usb_support'))
) if self.param('usb_support') is not None else None,
high_availability=otypes.HighAvailability(
enabled=self.param('high_availability'),
priority=self.param('high_availability_priority'),
) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
cpu=otypes.Cpu(
topology=otypes.CpuTopology(
cores=self.param('cpu_cores'),
sockets=self.param('cpu_sockets'),
threads=self.param('cpu_threads'),
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads')
)) else None,
cpu_tune=otypes.CpuTune(
vcpu_pins=[
otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
],
) if self.param('cpu_pinning') else None,
mode=otypes.CpuMode(self.param('cpu_mode')) if self.param(
'cpu_mode') else None,
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads'),
self.param('cpu_mode'),
self.param('cpu_pinning')
)) else None,
os=otypes.OperatingSystem(
type=self.param('operating_system'),
boot=otypes.Boot(
devices=[
otypes.BootDevice(dev) for dev in self.param('boot_devices')
],
) if self.param('boot_devices') else None
),
rng_device=otypes.RngDevice(
source=otypes.RngSource(self.param('rng_device')),
rate=otypes.Rate(
bytes=self.param('rng_bytes'),
period=self.param('rng_period')
)
) if self.param('rng_device') else None,
memory=convert_to_bytes(
self.param('memory')
) if self.param('memory') else None,
virtio_scsi=otypes.VirtioScsi(
enabled=self.param('virtio_scsi')
) if self.param('virtio_scsi') else None,
memory_policy=otypes.MemoryPolicy(
guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
ballooning=self.param('ballooning_enabled'),
max=convert_to_bytes(self.param('memory_max')),
) if any((
self.param('memory_guaranteed'),
self.param('ballooning_enabled') is not None,
self.param('memory_max')
)) else None,
description=self.param('description'),
placement_policy=otypes.VmPlacementPolicy(
affinity=otypes.VmAffinity(self.param('placement_policy')),
hosts=[
otypes.Host(name=self.param('host')),
] if self.param('host') else None,
) if self.param('placement_policy') else None,
soundcard_enabled=self.param('soundcard_enabled'),
display=otypes.Display(
smartcard_enabled=self.param('smartcard_enabled')
) if self.param('smartcard_enabled') is not None else None,
io=otypes.Io(
threads=self.param('io_threads'),
) if self.param('io_threads') is not None else None,
)
def __attach_watchdog(self, entity):
watchdogs_service = self._service.service(entity.id).watchdogs_service()
watchdog = self.param('watchdog')
if watchdog is not None:
current_watchdog = next(iter(watchdogs_service.list()), None)
if watchdog.get('model') is None and current_watchdog:
watchdogs_service.watchdog_service(current_watchdog.id).remove()
return True
elif watchdog.get('model') is not None and current_watchdog is None:
watchdogs_service.add(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model').lower()),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
elif current_watchdog is not None:
if (
str(current_watchdog.model).lower() != watchdog.get('model').lower() or
str(current_watchdog.action).lower() != watchdog.get('action').lower()
):
watchdogs_service.watchdog_service(current_watchdog.id).update(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model')),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
return False
def __get_vnic_profile_id(self, nic):
"""
Return VNIC profile ID looked up by it's name, because there can be
more VNIC profiles with same name, other criteria of filter is cluster.
"""
vnics_service = self._connection.system_service().vnic_profiles_service()
clusters_service = self._connection.system_service().clusters_service()
cluster = search_by_name(clusters_service, self.param('cluster'))
profiles = [
profile for profile in vnics_service.list()
if profile.name == nic.get('profile_name')
]
cluster_networks = [
net.id for net in self._connection.follow_link(cluster.networks)
]
try:
return next(
profile.id for profile in profiles
if profile.network.id in cluster_networks
)
except StopIteration:
raise Exception(
"Profile '%s' was not found in cluster '%s'" % (
nic.get('profile_name'),
self.param('cluster')
)
)
def __attach_nics(self, entity):
# Attach NICs to instance type, if specified:
nics_service = self._service.service(entity.id).nics_service()
for nic in self.param('nics'):
if search_by_name(nics_service, nic.get('name')) is None:
if not self._module.check_mode:
nics_service.add(
otypes.Nic(
name=nic.get('name'),
interface=otypes.NicInterface(
nic.get('interface', 'virtio')
),
vnic_profile=otypes.VnicProfile(
id=self.__get_vnic_profile_id(nic),
) if nic.get('profile_name') else None,
mac=otypes.Mac(
address=nic.get('mac_address')
) if nic.get('mac_address') else None,
)
)
self.changed = True
def __attach_graphical_console(self, entity):
graphical_console = self.param('graphical_console')
if not graphical_console:
return False
it_service = self._service.instance_type_service(entity.id)
gcs_service = it_service.graphics_consoles_service()
graphical_consoles = gcs_service.list()
# Remove all graphical consoles if there are any:
if bool(graphical_console.get('headless_mode')):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
return len(graphical_consoles) > 0
# If there are not gc add any gc to be added:
protocol = graphical_console.get('protocol')
if isinstance(protocol, str):
protocol = [protocol]
current_protocols = [str(gc.protocol) for gc in graphical_consoles]
if not current_protocols:
if not self._module.check_mode:
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
# Update consoles:
if sorted(protocol) != sorted(current_protocols):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
def post_update(self, entity):
self.post_present(entity.id)
def post_present(self, entity_id):
entity = self._service.service(entity_id).get()
self.changed = self.__attach_nics(entity)
self.changed = self.__attach_watchdog(entity)
self.changed = self.__attach_graphical_console(entity)
def update_check(self, entity):
cpu_mode = getattr(entity.cpu, 'mode')
it_display = entity.display
return (
not self.param('kernel_params_persist') and
equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
equal(self.param('type'), str(entity.type)) and
equal(self.param('name'), str(entity.name)) and
equal(self.param('operating_system'), str(entity.os.type)) and
equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
equal(self.param('smartcard_enabled'), getattr(it_display, 'smartcard_enabled', False)) and
equal(self.param('io_threads'), entity.io.threads) and
equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
equal(self.param('usb_support'), entity.usb.enabled) and
equal(self.param('virtio_scsi'), getattr(entity, 'smartcard_enabled', False)) and
equal(self.param('high_availability'), entity.high_availability.enabled) and
equal(self.param('high_availability_priority'), entity.high_availability.priority) and
equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
equal(self.param('description'), entity.description) and
equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None) and
equal(self.param('rng_bytes'), entity.rng_device.rate.bytes if entity.rng_device else None) and
equal(self.param('rng_period'), entity.rng_device.rate.period if entity.rng_device else None) and
equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present',
choices=['absent', 'present']),
name=dict(type='str'),
id=dict(type='str'),
memory=dict(type='str'),
memory_guaranteed=dict(type='str'),
memory_max=dict(type='str'),
cpu_sockets=dict(type='int'),
cpu_cores=dict(type='int'),
cpu_threads=dict(type='int'),
operating_system=dict(type='str'),
boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network']),
serial_console=dict(type='bool'),
usb_support=dict(type='bool'),
high_availability=dict(type='bool'),
high_availability_priority=dict(type='int'),
watchdog=dict(type='dict'),
host=dict(type='str'),
graphical_console=dict(type='dict'),
description=dict(type='str'),
cpu_mode=dict(type='str'),
rng_device=dict(type='str'),
rng_bytes=dict(type='int', default=None),
rng_period=dict(type='int', default=None),
placement_policy=dict(type='str'),
cpu_pinning=dict(type='list'),
soundcard_enabled=dict(type='bool', default=None),
virtio_scsi=dict(type='bool', default=None),
smartcard_enabled=dict(type='bool', default=None),
io_threads=dict(type='int', default=None),
nics=dict(type='list', default=[]),
ballooning_enabled=dict(type='bool', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['id', 'name']],
)
check_sdk(module)
check_params(module)
try:
state = module.params['state']
auth = module.params.pop('auth')
connection = create_connection(auth)
its_service = connection.system_service().instance_types_service()
its_module = InstanceTypeModule(
connection=connection,
module=module,
service=its_service,
)
it = its_module.search_entity()
if state == 'present':
ret = its_module.create(
entity=it
)
its_module.post_present(ret['id'])
ret['changed'] = its_module.changed
elif state == 'absent':
ret = its_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
mindbender-studio/setup
|
refs/heads/master
|
bin/windows/python36/Lib/site-packages/pip/_vendor/packaging/utils.py
|
1126
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
|
azevakin/django-push-notifications
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
import os.path
from distutils.core import setup
README = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking",
]
import push_notifications
setup(
name="django-push-notifications",
packages=[
"push_notifications",
"push_notifications/migrations",
"push_notifications/management",
"push_notifications/management/commands",
],
author=push_notifications.__author__,
author_email=push_notifications.__email__,
classifiers=CLASSIFIERS,
description="Send push notifications to mobile devices through GCM or APNS in Django.",
download_url="https://github.com/jleclanche/django-push-notifications/tarball/master",
long_description=README,
url="https://github.com/jleclanche/django-push-notifications",
version=push_notifications.__version__,
)
|
unaizalakain/django
|
refs/heads/master
|
tests/admin_checks/tests.py
|
118
|
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import Album, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ['error!']
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks']
)
class SystemChecksTestCase(SimpleTestCase):
@override_settings(DEBUG=True)
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
admin.sites.system_check_errors = []
@override_settings(DEBUG=True)
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
admin.sites.system_check_errors = []
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
hint=None,
obj=SongAdmin,
id='admin.E122',
)
]
self.assertEqual(errors, expected)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
Tests for a tuple/list for the first fieldset's fields.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
Tests for a tuple/list for the second fieldset's fields.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
Ensure that a model without a GenericForeignKey raises problems if it's included
in an GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
hint=None,
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexisting', "
"which is not an attribute of 'admin_checks.Album'.",
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
|
jiajiax/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_default-src_self_img_blocked-manual.py
|
30
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
response.headers.set("Content-Security-Policy", "default-src 'self'")
response.headers.set("X-Content-Security-Policy", "default-src 'self'")
response.headers.set("X-WebKit-CSP", "default-src 'self'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_self_img_blocked</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src 'self'"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<img src='""" + url1 + """/tests/csp/support/red-100x100.png'/>
</body>
</html> """
|
lesina/Hack70
|
refs/heads/master
|
env/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
sadmansk/servo
|
refs/heads/master
|
components/net/tests/cookie_http_state_utils.py
|
111
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import tempfile
REPO = "https://github.com/abarth/http-state.git"
TEST_FILE = "cookie_http_state.rs"
DOMAIN = "http://home.example.org:8888"
RUST_FN = """
#[test]{should_panic}
fn test_{name}() {{
let r = run("{set_location}",
{set_cookies},
"{location}");
assert_eq!(&r, "{expect}");
}}
"""
SET_COOKIES_INDENT = 18
SHOULD_PANIC = "\n#[should_panic] // Look at cookie_http_state_utils.py if this test fails"
# Those tests should PASS. But until fixes land in servo, keep them failing
FAILING_TESTS = [
"0003", # Waiting for a way to clean expired cookies
"0006", # Waiting for a way to clean expired cookies
"mozilla0001", # Waiting for a way to clean expired cookies
"mozilla0002", # Waiting for a way to clean expired cookies
"mozilla0003", # Waiting for a way to clean expired cookies
"mozilla0005", # Waiting for a way to clean expired cookies
"mozilla0007", # Waiting for a way to clean expired cookies
"mozilla0009", # Waiting for a way to clean expired cookies
"mozilla0010", # Waiting for a way to clean expired cookies
"mozilla0013", # Waiting for a way to clean expired cookies
]
def list_tests(dir):
suffix = "-test"
def keep(name):
return name.endswith(suffix) and not name.startswith("disabled")
tests = [name[:-len(suffix)] for name in os.listdir(dir) if keep(name)]
tests.sort()
return tests
def escape(s):
""" Escape the string `s` so that it can be parsed by rust as a valid
UTF-8 string.
We can't use only `encode("unicode_escape")` as it produces things that
rust does not accept ("\\xbf", "\\u6265" for example). So we manually
convert all character whose code point is greater than 128 to
\\u{code_point}.
All other characters are encoded with "unicode_escape" to get escape
sequences ("\\r" for example) except for `"` that we specifically escape
because our string will be quoted by double-quotes.
Lines are also limited in size, so split the string every 70 characters
(gives room for indentation).
"""
res = ""
last_split = 0
for c in s:
if len(res) - last_split > 70:
res += "\\\n"
last_split = len(res)
o = ord(c)
if o == 34:
res += "\\\""
continue
if o >= 128:
res += "\\u{" + hex(o)[2:] + "}"
else:
res += c.encode("unicode_escape")
return res
def format_slice_cookies(cookies):
esc_cookies = ['"%s"' % escape(c) for c in cookies]
if sum(len(s) for s in esc_cookies) < 80:
sep = ", "
else:
sep = ",\n" + " " * SET_COOKIES_INDENT
return "&[" + sep.join(esc_cookies) + "]"
def generate_code_for_test(test_dir, name):
if name in FAILING_TESTS:
should_panic = SHOULD_PANIC
else:
should_panic = ""
test_file = os.path.join(test_dir, name + "-test")
expect_file = os.path.join(test_dir, name + "-expected")
set_cookies = []
set_location = DOMAIN + "/cookie-parser?" + name
expect = ""
location = DOMAIN + "/cookie-parser-result?" + name
with open(test_file) as fo:
for line in fo:
line = line.decode("utf-8").rstrip()
prefix = "Set-Cookie: "
if line.startswith(prefix):
set_cookies.append(line[len(prefix):])
prefix = "Location: "
if line.startswith(prefix):
location = line[len(prefix):]
if location.startswith("/"):
location = DOMAIN + location
with open(expect_file) as fo:
for line in fo:
line = line.decode("utf-8").rstrip()
prefix = "Cookie: "
if line.startswith(prefix):
expect = line[len(prefix):]
return RUST_FN.format(name=name.replace('-', '_'),
set_location=escape(set_location),
set_cookies=format_slice_cookies(set_cookies),
should_panic=should_panic,
location=escape(location),
expect=escape(expect))
def update_test_file(cachedir):
workdir = os.path.dirname(os.path.realpath(__file__))
test_file = os.path.join(workdir, TEST_FILE)
# Create the cache dir
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
# Clone or update the repo
repo_dir = os.path.join(cachedir, "http-state")
if os.path.isdir(repo_dir):
args = ["git", "pull", "-f"]
process = subprocess.Popen(args, cwd=repo_dir)
if process.wait() != 0:
print("failed to update the http-state git repo")
return 1
else:
args = ["git", "clone", REPO, repo_dir]
process = subprocess.Popen(args)
if process.wait() != 0:
print("failed to clone the http-state git repo")
return 1
# Truncate the unit test file to remove all existing tests
with open(test_file, "r+") as fo:
while True:
line = fo.readline()
if line.strip() == "// Test listing":
fo.truncate()
fo.flush()
break
if line == "":
print("Failed to find listing delimiter on unit test file")
return 1
# Append all tests to unit test file
tests_dir = os.path.join(repo_dir, "tests", "data", "parser")
with open(test_file, "a") as fo:
for test in list_tests(tests_dir):
fo.write(generate_code_for_test(tests_dir, test).encode("utf-8"))
return 0
if __name__ == "__main__":
update_test_file(tempfile.gettempdir())
|
petercb/aggravator
|
refs/heads/master
|
aggravator/__init__.py
|
1
|
'''
Custom dynamic inventory script for Ansible, in Python.
This script will read in a configuration file either locally or fetched via HTTP and will
output a JSON data structure describing the inventory by merging the files as listed in
the config file.
Files can be in either YAML or JSON format
'''
# pylint: disable=wrong-import-order
# Support python 2 and 3
from __future__ import (absolute_import)
from future import standard_library
standard_library.install_aliases()
from builtins import object # pylint: disable=redefined-builtin
# stdlib
import json
import os
import subprocess
import sys
import tempfile
## This is the python3 name, formerly urlparse, install_aliases() above make
## this work under python2
from urllib.parse import (urlparse, urljoin) # pylint: disable=import-error
# extras from packages
import ansible
import click
import deepmerge
import dpath.util
import requests
import yaml
# Ansible stuff for Secrets
from ansible.parsing.vault import is_encrypted as is_vault_encrypted
from ansible.parsing.vault import VaultLib
# Ansible utils
from ansible.module_utils._text import to_text
def get_config():
'''Determine location of root config file if none specified'''
self_path = os.path.dirname(os.path.realpath(sys.argv[0]))
check_paths = [
os.path.abspath(os.path.join(self_path, '..', 'etc', 'config.yaml')),
'/etc/aggravator/config.yaml',
'/usr/local/etc/aggravator/config.yaml'
]
for fp in check_paths:
if os.path.isfile(fp):
return fp
return None
def get_environment():
'''Determine the platform/environment name from name of called script'''
if os.path.islink(sys.argv[0]):
return os.path.basename(sys.argv[0])
return None
def create_links(environments, directory):
'''Create symlinks for platform inventories'''
errcount = 0
for ename in environments:
try:
os.symlink(
os.path.relpath(__file__, directory),
os.path.join(directory, ename)
)
except(OSError) as err:
click.echo("This symlink might already exist. Leaving it unchanged. Error: %s" % (err))
errcount += 1
return errcount
class Vault(object):
'''Read an Ansible vault'''
def __init__(self, password):
self._ansible_ver = float('.'.join(ansible.__version__.split('.')[:2]))
self.secret = password.encode('utf-8')
self.vault = VaultLib(self._make_secrets(self.secret))
def _make_secrets(self, secret):
'''make ansible version appropriate secret'''
if self._ansible_ver < 2.4:
return secret
from ansible.constants import DEFAULT_VAULT_ID_MATCH
from ansible.parsing.vault import VaultSecret
return [(DEFAULT_VAULT_ID_MATCH, VaultSecret(secret))]
def decrypt(self, stream):
'''read vault stream and return decrypted'''
return self.vault.decrypt(stream)
def fetch_data_remote(url, requestsobj=requests):
'''fetch data from url and return as plaintext'''
response = requestsobj.get(url)
if response.status_code == 404:
raise LookupError("Failed to find data at: {}".format(url))
response.raise_for_status()
return response.text
def fetch_data_local(localfile):
'''Fetch data from local file and return as plaintext'''
this_path = os.path.realpath(os.path.expanduser(localfile))
if not os.path.exists(this_path):
raise IOError("The file %s was not found" % this_path)
try:
f_obj = open(this_path, "rb")
data = f_obj.read().strip()
f_obj.close()
except (OSError, IOError) as err:
raise LookupError("Could not read file %s: %s" % (this_path, err))
return to_text(data, errors='surrogate_or_strict')
def fetch_data(uri, requestsobj=requests, data_type=None, vault_password=None):
'''Fetch data using either local or remote functions'''
uriobj = urlparse(uri)
loader = {
'json': getattr(json, 'loads'),
'yaml': getattr(yaml, 'safe_load')
}
if data_type is None:
# guess the data type from file extension
if uriobj.path.endswith(('.yaml', '.yml')):
data_type = 'yaml'
else:
data_type = os.path.splitext(uriobj.path)[1]
data_type = data_type.lower()
if data_type not in loader:
raise AttributeError("Unsupported data type: {}".format(data_type))
parser = loader[data_type]
if uriobj.scheme in ['file', '']:
data = fetch_data_local(uriobj.path)
elif uriobj.scheme in ['http', 'https']:
data = fetch_data_remote(uri, requestsobj)
else:
raise AttributeError("unsupported URI '{}'".format(uri))
if is_vault_encrypted(data):
if vault_password is None:
return {}
else:
vault = Vault(vault_password)
data = parser(vault.decrypt(data))
else:
temp_data = parser(data)
if 'sops' in temp_data:
with tempfile.NamedTemporaryFile(suffix='.'+data_type) as temp:
temp.write(data)
temp.flush()
data = subprocess.check_output(["sops", "-d", temp.name])
data = parser(data)
data.pop('sops', None)
else:
data = parser(data)
return data
def raise_for_type(item, types, section):
'''raise an AttributeError if `item` is not of `types`'''
type_names = None
if isinstance(types, tuple):
type_names = [t.__name__ for t in types]
elif isinstance(types, type):
type_names = types.__name__
else:
raise AttributeError(
"Invalid type '{}' for `types` parameter, must be type or tuple of types".format(
type(types).__name__
)
)
if not isinstance(item, types):
if isinstance(types, tuple):
", ".join(types)
raise AttributeError(
"invalid type '{}' in section '{}', must be: {}".format(
type(item).__name__,
section,
type_names
)
)
def convert_host_list_to_dict(inv):
'''
Iterate over the inventory data structure and convert any host groups that
are lists into dictionary form
'''
for group in inv:
if isinstance(inv[group], list):
# needs converting
inv[group] = {'hosts': inv[group]}
class Inventory(object):
'''Retrieve Ansible inventory from available sources and return as JSON'''
def __init__(self, uri, env=None, vault_password=None):
self.session = requests.Session()
self.config = fetch_data(uri, self.session)
self.uri = uri
self.env = env
self.vault_password = vault_password
def fetch_environments(self):
'''Fetch a list of environments that are defined upstream'''
return list(self.config.get('environments', {}))
def fetch(self, uri, data_type=None):
'''fetch the requested uri'''
uriobj = urlparse(uri)
# Check if it's an absolute uri, or a relative
if uriobj.scheme == '':
# Unspecified URI type, assume relative to config URI
return fetch_data(
urljoin(str(self.uri), uriobj.path),
self.session,
data_type,
self.vault_password
)
elif uriobj.scheme in ['file', 'http', 'https']:
# supported URI types
if uriobj.path.startswith('/'):
# Absolute path, fetch it
return fetch_data(uri, self.session, data_type, self.vault_password)
else:
# Assume relative to config URI
return fetch_data(
urljoin(self.uri, uriobj.path),
self.session,
data_type,
self.vault_password
)
else:
# Unsupported type
raise AttributeError("Unsupported type '{}'".format(uriobj.scheme))
def generate_inventory(self):
'''Generate inventory by merging hosts and variables'''
# Set the basic structure
my_merger = deepmerge.Merger(
[
(list, ["override"]),
(dict, ["merge"])
],
["override"],
["override"]
)
invdata = {
'_meta': {
'hostvars': {}
},
'all': {
'vars': {
'platform_name': self.env
}
}
}
# start merging
for inc in self.config.get('environments', {}).get(self.env, {}).get('include', []):
convert_host_list_to_dict(invdata)
if isinstance(inc, str):
# Just a plain file listed with no extra properties
# Pull it in and hope there are proper sections
# TODO: add some schema validation
from_file = self.fetch(inc)
invdata = my_merger.merge(invdata, from_file)
elif isinstance(inc, dict):
# Dictionary listing, fetch file in `path` into keyspace `key`
from_file = self.fetch(inc['path'], inc.get('format'))
if 'key' in inc:
# A key space is specified, load the file into it
key = inc['key']
try:
data = dpath.util.get(invdata, key)
data = my_merger.merge(data, from_file)
except KeyError:
dpath.util.new(invdata, key, from_file)
else:
# No keyspace defined, load the file into the root
invdata = my_merger.merge(invdata, from_file)
else:
raise_for_type(inc, (str, dict), ':'.join(['', self.env, 'include']))
return invdata
@click.command()
@click.option(
'--env', default=get_environment(), envvar='INVENTORY_ENV', show_default=True,
help='specify the platform name to pull inventory for'
)
@click.option(
'--uri', envvar='INVENTORY_URI', show_default=True,
default=get_config(),
help='specify the URI to query for inventory config file, supports file:// and http(s)://'
)
@click.option(
'--vault-password-file', 'vpfile', show_default=True,
type=click.Path(exists=False, dir_okay=False, file_okay=True, readable=True, resolve_path=True),
envvar='VAULT_PASSWORD_FILE', default=os.path.expanduser('~/.vault_pass.txt'),
help='vault password file, if set to /dev/null secret decryption will be disabled'
)
@click.option(
'--output-format', 'outformat', envvar='INVENTORY_FORMAT', show_default=True, default='yaml',
type=click.Choice(['yaml', 'json']), help='specify the output format'
)
@click.option('--list', 'list_flag', is_flag=True, help='Print inventory information as a JSON object')
@click.option('--host', help='Retrieve host variables (not implemented)')
@click.option(
'--createlinks', 'linkdir',
type=click.Path(exists=True, dir_okay=True, file_okay=False, writable=True),
help='Create symlinks in DIRECTORY to the script for each platform name retrieved'
)
@click.option('--show', 'show_flag', is_flag=True, help='Output a list of upstream environments (or groups if environment is set)')
def cli(env, uri, vpfile, outformat, list_flag, host, linkdir, show_flag):
'''Ansible file based dynamic inventory script'''
# Called with `--createlinks`
if linkdir:
return create_links(Inventory(uri).fetch_environments(), linkdir)
else:
if env is None:
if show_flag:
click.echo("Upstream environments:")
click.echo("\n".join(sorted(Inventory(uri).fetch_environments())))
else:
click.echo("Error: Missing environment, use --env or `export INVENTORY_ENV`")
return 1
else:
if show_flag:
grouplist = list(Inventory(uri, env).generate_inventory())
grouplist.remove('_meta')
click.echo("\n".join(sorted(grouplist)))
else:
# If vault password file is /dev/null, disable secrets decryption
if vpfile == '/dev/null':
vpfile = None
# Read in the vault password if one was provided
if vpfile is not None:
vault_password = fetch_data_local(vpfile)
else:
vault_password = None
data = None
# Called with `--list`.
if list_flag:
data = Inventory(uri, env, vault_password).generate_inventory()
# Called with `--host [hostname]`.
elif host:
# Not implemented, since we should return _meta info in `--list`.
data = {}
# require either --list or --host
else:
click.echo("Error: Missing parameter (--list or --host)?")
return 1
dumper = {
'json': getattr(json, 'dumps'),
'yaml': getattr(yaml, 'dump')
}
if outformat not in dumper:
raise AttributeError("Unsupported output data type: {}".format(outformat))
click.echo(dumper[outformat](data))
|
fedora-infra/mote
|
refs/heads/master
|
mote/__init__.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright © 2015-2016 Chaoyi Zha <cydrobolt@fedoraproject.org>
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os
import flask, random, string, json, re
import dateutil.parser, requests, collections
import logging
from six.moves import html_parser as html_parser_six
from bs4 import BeautifulSoup
from flask import Flask, render_template, request, url_for, session, redirect
from flask import abort
from flask_fas_openid import fas_login_required, FAS
from . import util, soke
fn_search_regex = "(.*?)\.([0-9]{4}\-[0-9]{2}\-[0-9]{2})\-.*?\..*?\.(.*)"
config = util.config()
__version__ = "0.0.0"
user_sessions = dict()
app = Flask("mote")
fas = FAS(app)
app.secret_key = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
app.config['FAS_OPENID_ENDPOINT'] = 'http://id.fedoraproject.org/'
app.config['FAS_CHECK_CERT'] = True
html_parser = html_parser_six.HTMLParser()
cwd = os.getcwd()
app.url_map.converters['regex'] = util.RegexConverter
if config.use_mappings_github == True:
name_mappings = requests.get("https://raw.githubusercontent.com/fedora-infra/mote/master/name_mappings.json").text
category_mappings = requests.get("https://raw.githubusercontent.com/fedora-infra/mote/master/category_mappings.json").text
else:
with open(config.name_mappings_path, 'r') as f:
name_mappings = f.read()
with open(config.category_mappings_path, 'r') as f:
category_mappings = f.read()
name_mappings = util.map_name_aliases(json.loads(name_mappings))
category_mappings = json.loads(category_mappings)
logging_format = '%(asctime)-15s %(message)s'
logging.basicConfig(format=logging_format)
logger = logging.getLogger(__name__)
if config.use_memcached == True:
import memcache
mc = memcache.Client([config.memcached_ip], debug=0)
def return_error(msg):
return render_template('error.html', error=msg)
def get_cache_data(key_name):
if key_name == "mote:team_meetings":
meeting_type = "team"
elif key_name == "mote:channel_meetings":
meeting_type = "channel"
elif key_name == "mote:latest_meetings":
meeting_type = "latest_meetings"
else:
meeting_type = None
if config.use_memcached == True:
try:
res = mc.get(key_name)
if res == None:
raise ValueError('Could not find requested key.')
return res
except ValueError:
try:
res = util.get_json_cache(meeting_type)
except RuntimeError:
soke.run()
res = util.get_json_cache(meeting_type)
return res
else:
# Skip memcached, use JSON store directly
try:
res = util.get_json_cache(meeting_type)
except RuntimeError:
soke.run()
res = util.get_json_cache(meeting_type)
return res
def get_friendly_name(group_id, channel=False):
if channel == True:
group_id = "#{}".format(group_id)
try:
friendly_name = name_mappings[group_id]["friendly-name"]
except KeyError:
friendly_name = False
return friendly_name
def handle_meeting_date_request(group_type, meeting_group, date_stamp):
try:
meetings = get_cache_data("mote:{}_meetings".format(group_type))
workable_array = meetings[meeting_group][date_stamp]
minutes = workable_array["minutes"]
logs = workable_array["logs"]
return render_template(
"date-list.html",
minutes=minutes,
date=date_stamp,
logs=logs,
type=group_type,
group_name=meeting_group
)
except KeyError:
raise ValueError("Meetings unable to be located.")
@app.route('/', methods=['GET'])
def index():
# Get latest meetings
latest_meetings = get_cache_data('mote:latest_meetings')
# Renders main page template.
return render_template('index.html',
latest_meetings=latest_meetings)
@app.route('/post_auth', methods=['GET'])
@app.route('/post_auth/', methods=['GET'])
@fas_login_required
def post_auth():
# Set local session variables after
# FedOAuth authenticates the user.
session['logged'] = True
return redirect(url_for('index'))
@app.route('/<meeting_channel>/<regex("([0-9]{4}\-[0-9]{2}\-[0-9]{2})"):date_stamp>')
@app.route('/<meeting_channel>/<regex("([0-9]{4}\-[0-9]{2}\-[0-9]{2})"):date_stamp>/')
def catch_channel_date_request(meeting_channel, date_stamp):
try:
return handle_meeting_date_request("channel", meeting_channel, date_stamp)
except ValueError:
return return_error("Requested meetings could not be located.")
@app.route('/teams/<meeting_team>/<regex("([0-9]{4}\-[0-9]{2}\-[0-9]{2})"):date_stamp>')
@app.route('/teams/<meeting_team>/<regex("([0-9]{4}\-[0-9]{2}\-[0-9]{2})"):date_stamp>/')
def catch_team_date_request(meeting_team, date_stamp):
try:
return handle_meeting_date_request("team", meeting_team, date_stamp)
except ValueError:
return return_error("Requested meetings could not be located.")
@app.route('/teams/<meeting_team>')
@app.route('/teams/<meeting_team>/')
def catch_team_baserequest(meeting_team):
url = flask.url_for('sresults', group_id=meeting_team, type='team')
return flask.redirect(url)
@app.route('/<meeting_channel>/<date>/<regex("(.*?)\.[0-9]{4}\-[0-9]{2}\-[0-9]{2}\-.*"):file_name>')
def catch_channel_logrequest(date, file_name, meeting_channel):
# This route catches standard log requests (.log.html, .html, or .txt)
# Links referencing a meeting channel will be caught by this route.
# These URLs include those provided by MeetBot at the end of a meeting,
# or links referencing a specific meeting channel,
# such as #fedora-meeting or #fedora-ambassadors
# example: https://meetbot.fedoraproject.org/fedora-meeting-1/2015-02-09/releng.2015-02-09-16.31.html
m = re.search(fn_search_regex, file_name)
if m == None:
return abort(404)
log_extension = m.group(3) # type of log requested: log.html, html, or txt
meeting_date = date # date of log requested: YYYY-MM-DD
log_type = util.get_meeting_type(log_extension)
if log_type == "plain-text":
# Redirect to the plaintext file is one is requested.
built_url = "{}/{}/{}/{}".format(config.meetbot_prefix, meeting_channel, date, file_name)
return redirect(built_url)
return render_template("single-log.html", gtype="channel", ltype=log_type, group=meeting_channel, date=meeting_date, filename=file_name)
@app.route('/teams/<meeting_team>/<regex("(.*?)\.[0-9]{4}\-[0-9]{2}\-[0-9]{2}\-.*"):file_name>')
def catch_team_logrequest(file_name, meeting_team):
# This route catches standard log requests (.log.html, .html, or .txt)
# Links referencing a meeting team will be caught by this route.
# e.g referencing famna or infrastructure
# example: https://meetbot.fedoraproject.org/teams/fedora-mktg/fedora-mktg.2013-10-07-19.02.html
m = re.search(fn_search_regex, file_name)
if m == None:
return abort(404)
group_name = m.group(1) # name of team, e.g famna
meeting_date = m.group(2) # date of log requested: YYYY-MM-DD
log_extension = m.group(3) # type of log requested: log.html, html, or txt
log_type = util.get_meeting_type(log_extension)
if log_type == "plain-text":
built_url = "{}/teams/{}/{}".format(config.meetbot_prefix, meeting_team, file_name)
return redirect(built_url)
return render_template("single-log.html", gtype="team", ltype=log_type, group=group_name, date=meeting_date, filename=file_name)
@app.route('/request_logs')
@app.route('/request_logs/')
def request_logs():
""" Return a list of filenames for minutes and/or logs
for a specified date.
"""
group_id = request.args["group_id"]
group_type = request.args["group_type"]
date_stamp = request.args["date_stamp"]
if group_type == "team":
meetings = get_cache_data("mote:team_meetings")
elif group_type == "channel":
meetings = get_cache_data("mote:channel_meetings")
try:
workable_array = meetings[group_id][date_stamp]
minutes = workable_array["minutes"]
logs = workable_array["logs"]
response = json.dumps({"minutes": minutes, "logs": logs})
return response
except:
flask.abort(404)
@app.route('/get_meeting_log')
@app.route('/get_meeting_log/')
def get_meeting_log():
""" Return specific logs or minutes to client. """
group_type = request.args['group_type']
date_stamp = request.args['date_stamp']
group_id = request.args['group_id']
file_name = request.args['file_name']
file_type = request.args.get('file_type')
file_name = html_parser.unescape(file_name)
if group_type == "team":
link_prefix_ending = "/teams/" + group_id + "/"
else:
link_prefix_ending = "/" + group_id + "/" + date_stamp + "/"
url = config.meetbot_fetch_prefix + link_prefix_ending + file_name
try:
fetch_result = requests.get(url)
fetch_soup = BeautifulSoup(fetch_result.text)
if file_type == "log":
full_log_links = fetch_soup.findAll('a', text="full logs")
for a in full_log_links:
# prefix "full logs" links with correct paths
full_log_file_name = a['href']
a['href'] = link_prefix_ending + full_log_file_name
a['target'] = "_blank"
body_content = str(fetch_soup.body)
body_content = body_content.replace("</br>", "")
return body_content
except Exception:
flask.abort(404)
@app.route('/sresults', methods=['GET'])
@app.route('/sresults/', methods=['GET'])
def sresults():
# Display results for a meeting group.
group_id = request.args.get('group_id', '')
group_type = request.args.get('type', '')
friendly_name = get_friendly_name(group_id)
if (group_id == '') or (group_type == ''):
return return_error("Invalid group ID or type.")
if group_type == "team":
meetings = get_cache_data("mote:team_meetings")
elif group_type == "channel":
meetings = get_cache_data("mote:channel_meetings")
else:
return return_error("Invalid group type.")
try:
groupx_meetings = meetings[group_id]
except:
return return_error("Group not found.")
sorted_dates = list(groupx_meetings.keys())
try:
sorted_dates.sort(key=dateutil.parser.parse, reverse=True)
except:
return return_error("An error occured while fetching meetings.")
avail_dates = collections.OrderedDict()
try:
for date in sorted_dates:
parsed_date = dateutil.parser.parse(date)
month = parsed_date.strftime("%B")
year = parsed_date.year
if year not in avail_dates:
avail_dates[year] = collections.OrderedDict()
if month not in avail_dates[year]:
avail_dates[year][month] = []
avail_dates[year][month].append(date)
sorted_date_items = avail_dates.items()
avail_dates = collections.OrderedDict(sorted_date_items)
except:
pass
return render_template('sresults.html',
friendly_name = friendly_name,
name = group_id,
type = group_type,
avail_dates = avail_dates,
meetbot_location = config.meetbot_prefix,
latest_meeting = list(list(avail_dates.items())[0][1].items())[0][1][0]
)
@app.route('/search_sugg', methods=['GET'])
@app.route('/search_sugg/', methods=['GET'])
def search_sugg():
# Find and return the top 20 search results.
search_term = request.args.get('q', '')
channel_meetings = get_cache_data("mote:channel_meetings")
team_meetings = get_cache_data("mote:team_meetings")
results = []
res_num = 0
display_num = 20
for cmk in channel_meetings:
if res_num >= display_num:
break
if search_term in cmk:
friendly_name = get_friendly_name(cmk) or "A friendly meeting group."
try:
dates, latest = util.get_arrow_dates(channel_meetings[cmk])
except KeyError:
continue
if not dates:
continue
results.append({
"id": cmk,
"name": cmk,
"type": "channel",
"description": friendly_name,
"latest": latest.timestamp,
"latest_human": latest.humanize(),
})
res_num += 1
for tmk in team_meetings:
if res_num >= display_num:
break
if search_term in tmk:
friendly_name = get_friendly_name(tmk) or "A friendly meeting group."
try:
dates, latest = util.get_arrow_dates(team_meetings[tmk])
except KeyError:
continue
results.append({
"id": tmk,
"name": tmk,
"type": "team",
"description": friendly_name,
"latest": latest.timestamp,
"latest_human": latest.humanize(),
})
res_num += 1
# Sort results based on relevance.
results = list(reversed(sorted(results, key=lambda k: k['latest'])))
return flask.jsonify(dict(items=results))
@app.route('/auth', methods=['GET'])
@app.route('/auth/', methods=['GET'])
def auth_login():
groups = config.admin_groups
next_url = url_for('post_auth')
return fas.login(return_url=next_url, groups=groups)
@app.route('/logout', methods=['GET'])
@app.route('/logout/', methods=['GET'])
def logout():
if flask.g.fas_user:
fas.logout()
session['logged'] = None
return redirect(url_for('index'))
@app.route('/admin', methods=['GET'])
@app.route('/admin/', methods=['GET'])
@fas_login_required
def admin_panel():
is_admin = False
for admin_group in config.admin_groups:
if admin_group in flask.g.fas_user.groups:
is_admin = True
if is_admin == True:
return render_template("admin.html")
else:
return render_template('error.html', error="Your account does not have access to this resource.")
@app.route('/browse', methods=['GET'])
@app.route('/browse/', methods=['GET'])
def browse():
browse_nmappings = dict()
for category_index in category_mappings:
for category in category_mappings[category_index]:
try:
browse_nmappings[category] = name_mappings[category]["friendly-name"]
except KeyError:
browse_nmappings[category] = category
return render_template('browse.html', category_mappings=category_mappings, browse_nmappings=browse_nmappings)
@app.errorhandler(404)
def not_found_404(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error_500(e):
return render_template('500.html'), 500
|
sruffell/asterisk-working
|
refs/heads/for-trunk
|
contrib/ast-db-manage/config/versions/21e526ad3040_add_pjsip_debug_option.py
|
13
|
"""add pjsip debug option
Revision ID: 21e526ad3040
Revises: 2fc7930b41b3
Create Date: 2014-01-30 10:44:02.297455
"""
# revision identifiers, used by Alembic.
revision = '21e526ad3040'
down_revision = '2fc7930b41b3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ps_globals', sa.Column('debug', sa.String(40)))
def downgrade():
op.drop_column('ps_globals', 'debug')
|
StackPointCloud/ansible-modules-extras
|
refs/heads/devel
|
packaging/language/pear.py
|
157
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pear
short_description: Manage pear/pecl packages
description:
- Manage PHP packages with the pear package manager.
version_added: 2.0
author:
- "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: true
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
'''
EXAMPLES = '''
# Install pear package
- pear: name=Net_URL2 state=present
# Install pecl package
- pear: name=pecl/json_post state=present
# Upgrade package
- pear: name=Net_URL2 state=latest
# Remove packages
- pear: name=Net_URL2,pecl/json_post state=absent
'''
import os
def get_local_version(pear_output):
"""Take pear remoteinfo output and get the installed version"""
lines = pear_output.split('\n')
for line in lines:
if 'Installed ' in line:
installed = line.rsplit(None, 1)[-1].strip()
if installed == '-': continue
return installed
return None
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
for line in lines:
if 'Latest ' in line:
return line.rsplit(None, 1)[-1].strip()
return None
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "pear info %s" % (name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
rcmd = "pear remote-info %s" % (name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version installed locally (if any)
lversion = get_local_version(rstdout)
# get the version in the repository
rversion = get_repository_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally,
# and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, package)
if not installed:
continue
cmd = "pear uninstall %s" % (package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, state, packages):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present
# or state == latest and is up-to-date then skip
installed, updated = query_package(module, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if state == 'present':
command = 'install'
if state == 'latest':
command = 'upgrade'
cmd = "pear %s %s" % (command, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def exe_exists(program):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return True
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])),
required_one_of = [['name']],
supports_check_mode = True)
if not exe_exists("pear"):
module.fail_json(msg="cannot find pear executable in PATH")
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
pkg_files.append(None)
if module.check_mode:
check_packages(module, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/completion/uninitialized/fun.py
|
13
|
f<caret>
def foo():
pass
|
usersource/tasks
|
refs/heads/master
|
tasks_phonegap/Tasks/cordova/plugins/io.usersource.anno/tools/copytool2/oauth2client/locked_file.py
|
144
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
from oauth2client import util
logger = logging.getLogger(__name__)
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
def validate_file(filename):
if os.path.islink(filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % filename)
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.close(self._lock_fd)
os.unlink(lock_filename)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
@util.positional(4)
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
|
divegeek/keyczar
|
refs/heads/master
|
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/masm.py
|
19
|
"""SCons.Tool.masm
Tool-specific initialization for the Microsoft Assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/masm.py 4043 2009/02/23 09:06:45 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for masm to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = 'ml'
env['ASFLAGS'] = SCons.Util.CLVar('/nologo')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS /c /Fo$TARGET $SOURCES'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c /Fo$TARGET $SOURCES'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('ml')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ryano144/intellij-community
|
refs/heads/master
|
python/testData/paramInfo/SimpleMethod.py
|
83
|
class A:
def foo(self, a):
pass
a = A()
a.foo(<arg1>1)
|
hydroshare/hydroshare
|
refs/heads/develop
|
hs_access_control/tests/test_public_groups.py
|
1
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes, GroupAccess
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T01PublicGroups(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T01PublicGroups, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.cats = self.cat.uaccess.create_group(
title='cats', description="We are the cats")
self.posts = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.cat,
title='all about scratching posts',
metadata=[],
)
self.cat.uaccess.share_resource_with_group(self.posts, self.cats, PrivilegeCodes.VIEW)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='not a cat',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.dogs = self.dog.uaccess.create_group(
title='dogs', description="We are the dogs")
self.bones = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about bones',
metadata=[],
)
self.dog.uaccess.share_resource_with_group(self.bones, self.dogs, PrivilegeCodes.VIEW)
self.pets = self.dog.uaccess.create_community(
'all kinds of pets',
'collaboration on how to be a better pet.')
# Make cats and dogs part of community pets
self.dog.uaccess.share_community_with_group(self.pets, self.dogs, PrivilegeCodes.VIEW)
self.cat.uaccess.share_group_with_user(self.cats, self.dog, PrivilegeCodes.OWNER)
self.dog.uaccess.share_community_with_group(self.pets, self.cats, PrivilegeCodes.VIEW)
self.cat.uaccess.unshare_group_with_user(self.cats, self.dog)
def test_01_groups(self):
"basic function: groups appear and disappear according to access rules "
# flag state
self.assertFalse(self.posts.raccess.discoverable)
groups = GroupAccess.groups_with_public_resources()
self.assertTrue(is_equal_to_as_set([], groups))
# override policies for discoverable data
self.posts.raccess.discoverable = True
self.posts.raccess.save()
# group should appear in list
groups = GroupAccess.groups_with_public_resources()
self.assertTrue(is_equal_to_as_set([self.cats], groups))
# group should contain a public resource
resources = self.cats.gaccess.public_resources
self.assertTrue(is_equal_to_as_set([self.posts], resources))
self.bones.raccess.discoverable = True
self.bones.raccess.save()
# Now group dogs should appear in list
groups = GroupAccess.groups_with_public_resources()
print(groups)
self.assertTrue(is_equal_to_as_set([self.cats, self.dogs], groups))
# group should contain a public resource
resources = self.dogs.gaccess.public_resources
self.assertTrue(is_equal_to_as_set([self.bones], resources))
def test_02_communities(self):
"groups appear and disappear from communities according to access rules "
# flag state
self.assertFalse(self.posts.raccess.discoverable)
groups = self.pets.groups_with_public_resources()
self.assertTrue(is_equal_to_as_set([], groups))
# override policies for discoverable data
self.posts.raccess.discoverable = True
self.posts.raccess.save()
# group should appear in list
groups = self.pets.groups_with_public_resources()
self.assertTrue(is_equal_to_as_set([self.cats], groups))
# group should contain a public resource
resources = self.pets.public_resources
self.assertTrue(is_equal_to_as_set([self.posts], resources))
self.bones.raccess.discoverable = True
self.bones.raccess.save()
# Now group dogs should appear in list
groups = self.pets.groups_with_public_resources()
self.assertTrue(is_equal_to_as_set([self.cats, self.dogs], groups))
# group should contain a public resource
resources = self.pets.public_resources
self.assertTrue(is_equal_to_as_set([self.posts, self.bones], resources))
|
marcore/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/xqueue_interface.py
|
179
|
#
# LMS Interface to external queueing system (xqueue)
#
import hashlib
import json
import logging
import requests
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
dateformat = '%Y%m%d%H%M%S'
XQUEUE_METRIC_NAME = 'edxapp.xqueue'
# Wait time for response from Xqueue.
XQUEUE_TIMEOUT = 35 # seconds
def make_hashkey(seed):
"""
Generate a string key by hashing
"""
h = hashlib.md5()
h.update(str(seed))
return h.hexdigest()
def make_xheader(lms_callback_url, lms_key, queue_name):
"""
Generate header for delivery and reply of queue request.
Xqueue header is a JSON-serialized dict:
{ 'lms_callback_url': url to which xqueue will return the request (string),
'lms_key': secret key used by LMS to protect its state (string),
'queue_name': designate a specific queue within xqueue server, e.g. 'MITx-6.00x' (string)
}
"""
return json.dumps({
'lms_callback_url': lms_callback_url,
'lms_key': lms_key,
'queue_name': queue_name
})
def parse_xreply(xreply):
"""
Parse the reply from xqueue. Messages are JSON-serialized dict:
{ 'return_code': 0 (success), 1 (fail)
'content': Message from xqueue (string)
}
"""
try:
xreply = json.loads(xreply)
except ValueError, err:
log.error(err)
return (1, 'unexpected reply from server')
return_code = xreply['return_code']
content = xreply['content']
return (return_code, content)
class XQueueInterface(object):
"""
Interface to the external grading system
"""
def __init__(self, url, django_auth, requests_auth=None):
self.url = unicode(url)
self.auth = django_auth
self.session = requests.Session()
self.session.auth = requests_auth
def send_to_queue(self, header, body, files_to_upload=None):
"""
Submit a request to xqueue.
header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'
body: Serialized data for the receipient behind the queueing service. The operation of
xqueue is agnostic to the contents of 'body'
files_to_upload: List of file objects to be uploaded to xqueue along with queue request
Returns (error_code, msg) where error_code != 0 indicates an error
"""
# log the send to xqueue
header_info = json.loads(header)
queue_name = header_info.get('queue_name', u'')
dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
u'action:send_to_queue',
u'queue:{}'.format(queue_name)
])
# Attempt to send to queue
(error, msg) = self._send_to_queue(header, body, files_to_upload)
# Log in, then try again
if error and (msg == 'login_required'):
(error, content) = self._login()
if error != 0:
# when the login fails
log.debug("Failed to login to queue: %s", content)
return (error, content)
if files_to_upload is not None:
# Need to rewind file pointers
for f in files_to_upload:
f.seek(0)
(error, msg) = self._send_to_queue(header, body, files_to_upload)
return (error, msg)
def _login(self):
payload = {
'username': self.auth['username'],
'password': self.auth['password']
}
return self._http_post(self.url + '/xqueue/login/', payload)
def _send_to_queue(self, header, body, files_to_upload):
payload = {
'xqueue_header': header,
'xqueue_body': body
}
files = {}
if files_to_upload is not None:
for f in files_to_upload:
files.update({f.name: f})
return self._http_post(self.url + '/xqueue/submit/', payload, files=files)
def _http_post(self, url, data, files=None):
try:
r = self.session.post(url, data=data, files=files)
except requests.exceptions.ConnectionError, err:
log.error(err)
return (1, 'cannot connect to server')
if r.status_code not in [200]:
return (1, 'unexpected HTTP status code [%d]' % r.status_code)
return parse_xreply(r.text)
|
xiaonanln/myleetcode-python
|
refs/heads/master
|
src/Symmetric Tree.py
|
1
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if root is None: return True
return self.solve(root.left, root.right)
def solve(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val != right.val:
return False
return self.solve(left.left, right.right) and self.solve(left.right, right.left)
import utils as u
print Solution().isSymmetric( u.maketree( [1, 2, 2, 3, 4, 4, 3] ) )
print Solution().isSymmetric( u.maketree( [1, 2, 2, None, 3, None, 3] ) )
|
raphaelmerx/django
|
refs/heads/master
|
django/test/__init__.py
|
341
|
"""
Django Unit Test and Doctest framework.
"""
from django.test.client import Client, RequestFactory
from django.test.testcases import (
LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase,
skipIfDBFeature, skipUnlessAnyDBFeature, skipUnlessDBFeature,
)
from django.test.utils import (
ignore_warnings, modify_settings, override_settings,
override_system_checks,
)
__all__ = [
'Client', 'RequestFactory', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'LiveServerTestCase', 'skipIfDBFeature',
'skipUnlessAnyDBFeature', 'skipUnlessDBFeature', 'ignore_warnings',
'modify_settings', 'override_settings', 'override_system_checks'
]
# To simplify Django's test suite; not meant as a public API
try:
from unittest import mock # NOQA
except ImportError:
try:
import mock # NOQA
except ImportError:
pass
|
pombredanne/regex2dfa
|
refs/heads/master
|
tests.py
|
3
|
import unittest
import glob
import sys
sys.path.append(".")
import regex2dfa
class AllTests(unittest.TestCase):
def test_all_regexes(self):
regexes = glob.glob("tests/*.regex")
for filepath in regexes:
self._test_regex_file(filepath)
def _test_regex_file(self, filepath):
with open(filepath) as fh:
regex = fh.read()
actual_dfa = regex2dfa.regex2dfa(regex)
filepath_dfa = filepath[:-5] + "dfa"
with open(filepath_dfa) as fh:
expected_dfa = fh.read()
actual_dfa = actual_dfa.strip()
expected_dfa = expected_dfa.strip()
self.assertEquals(expected_dfa, actual_dfa)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(AllTests))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
zenoss/ZenPacks.community.AIX
|
refs/heads/master
|
ZenPacks/community/AIX/AIXFileSystem.py
|
3
|
__doc__="""AIXFileSystem
AIXFileSystem is a file system on an aix server
$Id: FileSystem.py,v 1.12 2004/04/06 22:33:23 edahl Exp $"""
import logging
log = logging.getLogger("zen.AIXFileSystem")
from Globals import DTMLFile
from Globals import InitializeClass
from Products.ZenUtils.Utils import prepId
from Products.ZenRelations.RelSchema import *
from Products.ZenModel.OSComponent import OSComponent
from Products.ZenModel.FileSystem import FileSystem
from Products.ZenModel.ZenossSecurity import *
import copy
def manage_addFileSystem(context, id, userCreated, REQUEST=None):
"""make a filesystem"""
fsid = prepId(id)
fs = AIXFileSystem(fsid)
context._setObject(fsid, fs)
fs = context._getOb(fsid)
if userCreated: fs.setUserCreateFlag()
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(context.absolute_url()+'/manage_main')
return fs
addFileSystem = DTMLFile('dtml/addFileSystem',globals())
class AIXFileSystem(FileSystem):
"""
AIXFileSystem object
"""
# This is set in the FileSystem Object
# portal_type = meta_type = 'FileSystem'
#manage_editFileSystemForm = DTMLFile('dtml/manageEditFileSystem',globals())
#mount = ""
#storageDevice = ""
#type = ""
#totalBlocks = 0L
#totalFiles = 0L
#capacity = 0
#inodeCapacity = 0
#maxNameLen = 0
# Define our new properties
aixFsFree = ""
aixFsNumInodes = ""
aixFsUsedInodes = ""
aixFsStatus = ""
aixFsExecution = ""
aixFsResultMsg = ""
blockSize = 1024**2
_properties = FileSystem._properties + (
{'id':'blockSize', 'type':'int', 'mode':''},
{'id':'aixFsFree', 'type':'string', 'mode':''},
{'id':'aixFsNumInodes', 'type':'string', 'mode':''},
{'id':'aixFsUsedInodes', 'type':'string', 'mode':''},
{'id':'aixFsStatus', 'type':'string', 'mode':''},
{'id':'aixFsExecution', 'type':'string', 'mode':''},
{'id':'aixFsResultMsg', 'type':'string', 'mode':''},
)
# Extend the Relationship Model
# Base off of OSComponent as we want to override Filesystems relations
_relations = OSComponent._relations + (
("logicalvolume", ToOne(ToManyCont, "ZenPacks.community.AIX.AIXLogicalVolume", "filesystem")),
)
# Override the web templates
factory_type_information = (
{
'id' : 'FileSystem',
'meta_type' : 'FileSystem',
'description' : """Arbitrary device grouping class""",
'icon' : 'FileSystem_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addFileSystem',
'immediate_view' : 'viewAIXFileSystem',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewAIXFileSystem'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'events'
, 'name' : 'Events'
, 'action' : 'viewEvents'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : ("Change Device", )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
def usedInodes(self, default = None):
"""
Return the number of used inodes stored in the filesystem's rrd file
"""
inodes = self.getRRDValue('usedInodes_usedInodes')
if inodes is not None:
return long(inodes)
else:
return None
def usedInodesString(self):
"""
Return the number of used Inodes in human readable form ie 10MB
"""
__pychecker__='no-constCond'
ui = self.usedInodes()
return ui is None and "unknown" or ui
def numInodes(self, default = None):
"""
Return the number of inodes stored in the filesystem's rrd file
"""
inodes = self.getRRDValue('numInodes_numInodes')
if inodes is not None:
return long(inodes)
else:
return None
def numInodesString(self):
"""
Return the number of Inodes in human readable form ie 10MB
"""
__pychecker__='no-constCond'
ni = self.numInodes()
return ni is None and "unknown" or ni
InitializeClass(AIXFileSystem)
|
shaftoe/home-assistant
|
refs/heads/dev
|
script/inspect_schemas.py
|
31
|
#!/usr/bin/env python3
"""Inspect all component SCHEMAS."""
import os
import importlib
import pkgutil
from homeassistant.config import _identify_config_schema
from homeassistant.scripts.check_config import color
def explore_module(package):
"""Explore the modules."""
module = importlib.import_module(package)
if not hasattr(module, '__path__'):
return []
for _, name, _ in pkgutil.iter_modules(module.__path__, package + '.'):
yield name
def main():
"""Main section of the script."""
if not os.path.isfile('requirements_all.txt'):
print('Run this from HA root dir')
return
msg = {}
def add_msg(key, item):
"""Add a message."""
if key not in msg:
msg[key] = []
msg[key].append(item)
for package in explore_module('homeassistant.components'):
module = importlib.import_module(package)
module_name = getattr(module, 'DOMAIN', module.__name__)
if hasattr(module, 'PLATFORM_SCHEMA'):
if hasattr(module, 'CONFIG_SCHEMA'):
add_msg('WARNING', "Module {} contains PLATFORM and CONFIG "
"schemas".format(module_name))
add_msg('PLATFORM SCHEMA', module_name)
continue
if not hasattr(module, 'CONFIG_SCHEMA'):
add_msg('NO SCHEMA', module_name)
continue
schema_type, schema = _identify_config_schema(module)
add_msg("CONFIG_SCHEMA " + schema_type, module_name + ' ' +
color('cyan', str(schema)[:60]))
for key in sorted(msg):
print("\n{}\n - {}".format(key, '\n - '.join(msg[key])))
if __name__ == '__main__':
main()
|
fafaman/django
|
refs/heads/master
|
django/views/decorators/cache.py
|
586
|
from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import (
available_attrs, decorator_from_middleware_with_args,
)
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
jhayworth/config
|
refs/heads/master
|
.emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/pip/_vendor/requests/api.py
|
37
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
|
mozilla/stoneridge
|
refs/heads/master
|
python/src/Lib/test/test_pkgutil.py
|
113
|
from test.test_support import run_unittest
import unittest
import sys
import imp
import pkgutil
import os
import os.path
import tempfile
import shutil
import zipfile
class PkgutilTests(unittest.TestCase):
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.dirname)
sys.path.insert(0, self.dirname)
def tearDown(self):
del sys.path[0]
def test_getdata_filesys(self):
pkg = 'test_getdata_filesys'
# Include a LF and a CRLF, to test that binary data is read back
RESOURCE_DATA = 'Hello, world!\nSecond line\r\nThird line'
# Make a package with some resources
package_dir = os.path.join(self.dirname, pkg)
os.mkdir(package_dir)
# Empty init.py
f = open(os.path.join(package_dir, '__init__.py'), "wb")
f.close()
# Resource files, res.txt, sub/res.txt
f = open(os.path.join(package_dir, 'res.txt'), "wb")
f.write(RESOURCE_DATA)
f.close()
os.mkdir(os.path.join(package_dir, 'sub'))
f = open(os.path.join(package_dir, 'sub', 'res.txt'), "wb")
f.write(RESOURCE_DATA)
f.close()
# Check we can read the resources
res1 = pkgutil.get_data(pkg, 'res.txt')
self.assertEqual(res1, RESOURCE_DATA)
res2 = pkgutil.get_data(pkg, 'sub/res.txt')
self.assertEqual(res2, RESOURCE_DATA)
del sys.modules[pkg]
def test_getdata_zipfile(self):
zip = 'test_getdata_zipfile.zip'
pkg = 'test_getdata_zipfile'
# Include a LF and a CRLF, to test that binary data is read back
RESOURCE_DATA = 'Hello, world!\nSecond line\r\nThird line'
# Make a package with some resources
zip_file = os.path.join(self.dirname, zip)
z = zipfile.ZipFile(zip_file, 'w')
# Empty init.py
z.writestr(pkg + '/__init__.py', "")
# Resource files, res.txt, sub/res.txt
z.writestr(pkg + '/res.txt', RESOURCE_DATA)
z.writestr(pkg + '/sub/res.txt', RESOURCE_DATA)
z.close()
# Check we can read the resources
sys.path.insert(0, zip_file)
res1 = pkgutil.get_data(pkg, 'res.txt')
self.assertEqual(res1, RESOURCE_DATA)
res2 = pkgutil.get_data(pkg, 'sub/res.txt')
self.assertEqual(res2, RESOURCE_DATA)
del sys.path[0]
del sys.modules[pkg]
def test_unreadable_dir_on_syspath(self):
# issue7367 - walk_packages failed if unreadable dir on sys.path
package_name = "unreadable_package"
d = os.path.join(self.dirname, package_name)
# this does not appear to create an unreadable dir on Windows
# but the test should not fail anyway
os.mkdir(d, 0)
self.addCleanup(os.rmdir, d)
for t in pkgutil.walk_packages(path=[self.dirname]):
self.fail("unexpected package found")
class PkgutilPEP302Tests(unittest.TestCase):
class MyTestLoader(object):
def load_module(self, fullname):
# Create an empty module
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = "<%s>" % self.__class__.__name__
mod.__loader__ = self
# Make it a package
mod.__path__ = []
# Count how many times the module is reloaded
mod.__dict__['loads'] = mod.__dict__.get('loads',0) + 1
return mod
def get_data(self, path):
return "Hello, world!"
class MyTestImporter(object):
def find_module(self, fullname, path=None):
return PkgutilPEP302Tests.MyTestLoader()
def setUp(self):
sys.meta_path.insert(0, self.MyTestImporter())
def tearDown(self):
del sys.meta_path[0]
def test_getdata_pep302(self):
# Use a dummy importer/loader
self.assertEqual(pkgutil.get_data('foo', 'dummy'), "Hello, world!")
del sys.modules['foo']
def test_alreadyloaded(self):
# Ensure that get_data works without reloading - the "loads" module
# variable in the example loader should count how many times a reload
# occurs.
import foo
self.assertEqual(foo.loads, 1)
self.assertEqual(pkgutil.get_data('foo', 'dummy'), "Hello, world!")
self.assertEqual(foo.loads, 1)
del sys.modules['foo']
def test_main():
run_unittest(PkgutilTests, PkgutilPEP302Tests)
# this is necessary if test is run repeated (like when finding leaks)
import zipimport
zipimport._zip_directory_cache.clear()
if __name__ == '__main__':
test_main()
|
teltek/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/auth_exchange/tests/mixins.py
|
1
|
"""
Mixins to facilitate testing OAuth connections to Django-OAuth-Toolkit or
Django-OAuth2-Provider.
"""
from unittest import skip, expectedFailure
from django.test.client import RequestFactory
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.djangoapps.oauth_dispatch.tests.constants import DUMMY_REDIRECT_URL
from ..views import DOTAccessTokenExchangeView
class DOPAdapterMixin(object):
"""
Mixin to rewire existing tests to use django-oauth2-provider (DOP) backend
Overwrites self.client_id, self.access_token, self.oauth2_adapter
"""
client_id = 'dop_test_client_id'
access_token = 'dop_test_access_token'
oauth2_adapter = adapters.DOPAdapter()
def create_public_client(self, user, client_id=None):
"""
Create an oauth client application that is public.
"""
return self.oauth2_adapter.create_public_client(
name='Test Public Client',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def create_confidential_client(self, user, client_id=None):
"""
Create an oauth client application that is confidential.
"""
return self.oauth2_adapter.create_confidential_client(
name='Test Confidential Client',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def get_token_response_keys(self):
"""
Return the set of keys provided when requesting an access token
"""
return {'access_token', 'token_type', 'expires_in', 'scope'}
class DOTAdapterMixin(object):
"""
Mixin to rewire existing tests to use django-oauth-toolkit (DOT) backend
Overwrites self.client_id, self.access_token, self.oauth2_adapter
"""
client_id = 'dot_test_client_id'
access_token = 'dot_test_access_token'
oauth2_adapter = adapters.DOTAdapter()
def create_public_client(self, user, client_id=None):
"""
Create an oauth client application that is public.
"""
return self.oauth2_adapter.create_public_client(
name='Test Public Application',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def create_confidential_client(self, user, client_id=None):
"""
Create an oauth client application that is confidential.
"""
return self.oauth2_adapter.create_confidential_client(
name='Test Confidential Application',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def get_token_response_keys(self):
"""
Return the set of keys provided when requesting an access token
"""
return {'access_token', 'refresh_token', 'token_type', 'expires_in', 'scope'}
def test_get_method(self):
# Dispatch routes all get methods to DOP, so we test this on the view
request_factory = RequestFactory()
request = request_factory.get('/oauth2/exchange_access_token/')
request.session = {}
view = DOTAccessTokenExchangeView.as_view()
response = view(request, backend='facebook')
self.assertEqual(response.status_code, 400)
@expectedFailure
def test_single_access_token(self):
# TODO: Single access tokens not supported yet for DOT (See MA-2122)
super(DOTAdapterMixin, self).test_single_access_token()
|
deiga/robotframework-selenium2library
|
refs/heads/master
|
test/lib/mockito/__init__.py
|
69
|
#!/usr/bin/env python
# coding: utf-8
'''Mockito is a Test Spy framework.'''
__copyright__ = "Copyright 2008-2010, Mockito Contributors"
__license__ = "MIT"
__maintainer__ = "Mockito Maintainers"
__email__ = "mockito-python@googlegroups.com"
from mockito import mock, verify, verifyNoMoreInteractions, verifyZeroInteractions, when, unstub, ArgumentError
import inorder
from spying import spy
from verification import VerificationError
# Imports for compatibility
from mocking import Mock
from matchers import any, contains, times # use package import (``from mockito.matchers import any, contains``) instead of ``from mockito import any, contains``
from verification import never
__all__ = ['mock', 'spy', 'verify', 'verifyNoMoreInteractions', 'verifyZeroInteractions', 'inorder', 'when', 'unstub', 'VerificationError', 'ArgumentError',
'Mock', # deprecated
'any', # compatibility
'contains', # compatibility
'never', # compatibility
'times' # deprecated
]
|
Filechaser/nzbToMedia
|
refs/heads/master
|
libs/beetsplug/metasync/amarok.py
|
16
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from amarok's library via dbus
"""
from __future__ import division, absolute_import, print_function
from os.path import basename
from datetime import datetime
from time import mktime
from xml.sax.saxutils import escape
from beets.util import displayable_path
from beets.dbcore import types
from beets.library import DateType
from beetsplug.metasync import MetaSource
def import_dbus():
try:
return __import__('dbus')
except ImportError:
return None
dbus = import_dbus()
class Amarok(MetaSource):
item_types = {
'amarok_rating': types.INTEGER,
'amarok_score': types.FLOAT,
'amarok_uid': types.STRING,
'amarok_playcount': types.INTEGER,
'amarok_firstplayed': DateType(),
'amarok_lastplayed': DateType(),
}
queryXML = u'<query version="1.0"> \
<filters> \
<and><include field="filename" value="%s" /></and> \
</filters> \
</query>'
def __init__(self, config, log):
super(Amarok, self).__init__(config, log)
if not dbus:
raise ImportError('failed to import dbus')
self.collection = \
dbus.SessionBus().get_object('org.kde.amarok', '/Collection')
def sync_from_source(self, item):
path = displayable_path(item.path)
# amarok unfortunately doesn't allow searching for the full path, only
# for the patch relative to the mount point. But the full path is part
# of the result set. So query for the filename and then try to match
# the correct item from the results we get back
results = self.collection.Query(self.queryXML % escape(basename(path)))
for result in results:
if result['xesam:url'] != path:
continue
item.amarok_rating = result['xesam:userRating']
item.amarok_score = result['xesam:autoRating']
item.amarok_playcount = result['xesam:useCount']
item.amarok_uid = \
result['xesam:id'].replace('amarok-sqltrackuid://', '')
if result['xesam:firstUsed'][0][0] != 0:
# These dates are stored as timestamps in amarok's db, but
# exposed over dbus as fixed integers in the current timezone.
first_played = datetime(
result['xesam:firstUsed'][0][0],
result['xesam:firstUsed'][0][1],
result['xesam:firstUsed'][0][2],
result['xesam:firstUsed'][1][0],
result['xesam:firstUsed'][1][1],
result['xesam:firstUsed'][1][2]
)
if result['xesam:lastUsed'][0][0] != 0:
last_played = datetime(
result['xesam:lastUsed'][0][0],
result['xesam:lastUsed'][0][1],
result['xesam:lastUsed'][0][2],
result['xesam:lastUsed'][1][0],
result['xesam:lastUsed'][1][1],
result['xesam:lastUsed'][1][2]
)
else:
last_played = first_played
item.amarok_firstplayed = mktime(first_played.timetuple())
item.amarok_lastplayed = mktime(last_played.timetuple())
|
Intel-Corporation/tensorflow
|
refs/heads/master
|
tensorflow/python/data/util/sparse_test.py
|
13
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class SparseTest(test.TestCase):
def testAnySparse(self):
test_cases = (
{
"classes": (),
"expected": False
},
{
"classes": (ops.Tensor),
"expected": False
},
{
"classes": (((ops.Tensor))),
"expected": False
},
{
"classes": (ops.Tensor, ops.Tensor),
"expected": False
},
{
"classes": (ops.Tensor, sparse_tensor.SparseTensor),
"expected": True
},
{
"classes": (sparse_tensor.SparseTensor, sparse_tensor.SparseTensor),
"expected":
True
},
{
"classes": (sparse_tensor.SparseTensor, ops.Tensor),
"expected": True
},
{
"classes": (((sparse_tensor.SparseTensor))),
"expected": True
},
)
for test_case in test_cases:
self.assertEqual(
sparse.any_sparse(test_case["classes"]), test_case["expected"])
def assertShapesEqual(self, a, b):
for a, b in zip(nest.flatten(a), nest.flatten(b)):
self.assertEqual(a.ndims, b.ndims)
if a.ndims is None:
continue
for c, d in zip(a.as_list(), b.as_list()):
self.assertEqual(c, d)
def testAsDenseShapes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": tensor_shape.scalar(),
"classes": ops.Tensor,
"expected": tensor_shape.scalar()
},
{
"types": tensor_shape.scalar(),
"classes": sparse_tensor.SparseTensor,
"expected": tensor_shape.unknown_shape()
},
{
"types": (tensor_shape.scalar()),
"classes": (ops.Tensor),
"expected": (tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (ops.Tensor, ()),
"expected": (tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), ops.Tensor),
"expected": ((), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (tensor_shape.unknown_shape(), ())
},
{
"types": ((), tensor_shape.scalar()),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), tensor_shape.unknown_shape())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (tensor_shape.scalar(), (), tensor_shape.scalar())
},
{
"types": (tensor_shape.scalar(), (), tensor_shape.scalar()),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (tensor_shape.unknown_shape(), (),
tensor_shape.unknown_shape())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), tensor_shape.scalar(), ())
},
{
"types": ((), tensor_shape.scalar(), ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), tensor_shape.unknown_shape(), ())
},
)
for test_case in test_cases:
self.assertShapesEqual(
sparse.as_dense_shapes(test_case["types"], test_case["classes"]),
test_case["expected"])
def testAsDenseTypes(self):
test_cases = (
{
"types": (),
"classes": (),
"expected": ()
},
{
"types": dtypes.int32,
"classes": ops.Tensor,
"expected": dtypes.int32
},
{
"types": dtypes.int32,
"classes": sparse_tensor.SparseTensor,
"expected": dtypes.variant
},
{
"types": (dtypes.int32),
"classes": (ops.Tensor),
"expected": (dtypes.int32)
},
{
"types": (dtypes.int32),
"classes": (sparse_tensor.SparseTensor),
"expected": (dtypes.variant)
},
{
"types": (dtypes.int32, ()),
"classes": (ops.Tensor, ()),
"expected": (dtypes.int32, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), ops.Tensor),
"expected": ((), dtypes.int32)
},
{
"types": (dtypes.int32, ()),
"classes": (sparse_tensor.SparseTensor, ()),
"expected": (dtypes.variant, ())
},
{
"types": ((), dtypes.int32),
"classes": ((), sparse_tensor.SparseTensor),
"expected": ((), dtypes.variant)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (ops.Tensor, (), ops.Tensor),
"expected": (dtypes.int32, (), dtypes.int32)
},
{
"types": (dtypes.int32, (), dtypes.int32),
"classes": (sparse_tensor.SparseTensor, (),
sparse_tensor.SparseTensor),
"expected": (dtypes.variant, (), dtypes.variant)
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), ops.Tensor, ()),
"expected": ((), dtypes.int32, ())
},
{
"types": ((), dtypes.int32, ()),
"classes": ((), sparse_tensor.SparseTensor, ()),
"expected": ((), dtypes.variant, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.as_dense_types(test_case["types"], test_case["classes"]),
test_case["expected"])
def testGetClasses(self):
s = sparse_tensor.SparseTensor(indices=[[0]], values=[1], dense_shape=[1])
d = ops.Tensor
t = sparse_tensor.SparseTensor
test_cases = (
{
"classes": (),
"expected": ()
},
{
"classes": s,
"expected": t
},
{
"classes": constant_op.constant([1]),
"expected": d
},
{
"classes": (s),
"expected": (t)
},
{
"classes": (constant_op.constant([1])),
"expected": (d)
},
{
"classes": (s, ()),
"expected": (t, ())
},
{
"classes": ((), s),
"expected": ((), t)
},
{
"classes": (constant_op.constant([1]), ()),
"expected": (d, ())
},
{
"classes": ((), constant_op.constant([1])),
"expected": ((), d)
},
{
"classes": (s, (), constant_op.constant([1])),
"expected": (t, (), d)
},
{
"classes": ((), s, ()),
"expected": ((), t, ())
},
{
"classes": ((), constant_op.constant([1]), ()),
"expected": ((), d, ())
},
)
for test_case in test_cases:
self.assertEqual(
sparse.get_classes(test_case["classes"]), test_case["expected"])
def assertSparseValuesEqual(self, a, b):
if not isinstance(a, sparse_tensor.SparseTensor):
self.assertFalse(isinstance(b, sparse_tensor.SparseTensor))
self.assertEqual(a, b)
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
with self.cached_session():
self.assertAllEqual(a.eval().indices, self.evaluate(b).indices)
self.assertAllEqual(a.eval().values, self.evaluate(b).values)
self.assertAllEqual(a.eval().dense_shape, self.evaluate(b).dense_shape)
@test_util.run_deprecated_v1
def testSerializeDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
@test_util.run_deprecated_v1
def testSerializeManyDeserialize(self):
test_cases = (
(),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [3, 4]], values=[1, -1], dense_shape=[4, 5]),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
(sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]), ()),
((),
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])),
)
for expected in test_cases:
classes = sparse.get_classes(expected)
shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
classes)
types = nest.map_structure(lambda _: dtypes.int32, classes)
actual = sparse.deserialize_sparse_tensors(
sparse.serialize_many_sparse_tensors(expected), types, shapes,
sparse.get_classes(expected))
nest.assert_same_structure(expected, actual)
for a, e in zip(nest.flatten(actual), nest.flatten(expected)):
self.assertSparseValuesEqual(a, e)
if __name__ == "__main__":
test.main()
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/utils/trackref.py
|
19
|
"""This module provides some functions and classes to record and report
references to live object instances.
If you want live objects for a particular class to be tracked, you only have to
subclass form object_ref (instead of object). Also, remember to turn on
tracking by enabling the TRACK_REFS setting.
About performance: This library has a minimal performance impact when enabled,
and no performance penalty at all when disabled (as object_ref becomes just an
alias to object in that case).
"""
import weakref, os
from collections import defaultdict
from time import time
from operator import itemgetter
from types import NoneType
from scrapy.conf import settings
live_refs = defaultdict(weakref.WeakKeyDictionary)
class object_ref(object):
"""Inherit from this class (instead of object) to a keep a record of live
instances"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
live_refs[cls][obj] = time()
return obj
if not settings.getbool('TRACK_REFS'):
object_ref = object
def format_live_refs(ignore=NoneType):
if object_ref is object:
return "The trackref module is disabled. Use TRACK_REFS setting to enable it."
s = "Live References" + os.linesep + os.linesep
now = time()
for cls, wdict in live_refs.iteritems():
if not wdict:
continue
if issubclass(cls, ignore):
continue
oldest = min(wdict.itervalues())
s += "%-30s %6d oldest: %ds ago" % (cls.__name__, len(wdict), \
now-oldest) + os.linesep
return s
def print_live_refs(*a, **kw):
print format_live_refs(*a, **kw)
def get_oldest(class_name):
for cls, wdict in live_refs.iteritems():
if cls.__name__ == class_name:
if wdict:
return min(wdict.iteritems(), key=itemgetter(1))[0]
def iter_all(class_name):
for cls, wdict in live_refs.iteritems():
if cls.__name__ == class_name:
return wdict.iterkeys()
|
jriegel/FreeCAD
|
refs/heads/dev-assembly-next
|
src/Mod/Path/PathScripts/PathKurve.py
|
16
|
# -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Dan Falck <ddfalck@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
'''PathKurve - Path Profile operation using libarea (created by Dan Heeks) for making simple CNC paths.
libarea, originally from HeeksCNC project must be present for this to work.'''
import FreeCAD,FreeCADGui,Path,PathGui
from PathScripts import PathProject,PathUtils,PathKurveUtils,PathSelection
from PySide import QtCore,QtGui
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class PathProfile:
def __init__(self,obj):
obj.addProperty("App::PropertyLinkSub","Base","Path",translate("Parent Object","The base geometry of this toolpath"))
obj.addProperty("App::PropertyLinkSub","StartPoint", "Path", translate("Start Point","Linked Start Point of Profile"))
obj.addProperty("App::PropertyLinkSub","EndPoint", "Path", translate("End Point","Linked End Point of Profile"))
obj.addProperty("App::PropertyBool","Active","Path",translate("Active","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyString","Comment","Path",translate("Comment","An optional comment for this profile"))
obj.addProperty("App::PropertyIntegerConstraint","ToolNumber","Tool",translate("PathProfile","The tool number in use"))
obj.ToolNumber = (0,0,1000,1)
obj.setEditorMode('ToolNumber',1) #make this read only
#Depth Properties
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Depth", translate("Clearance Height","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyLength", "StepDown", "Depth", translate("StepDown","Incremental Step Down of Tool"))
# obj.addProperty("App::PropertyBool","UseStartDepth","Depth",translate("Use Start Depth","make True, if manually specifying a Start Start Depth"))
obj.addProperty("App::PropertyDistance", "StartDepth", "Depth", translate("Start Depth","Starting Depth of Tool- first cut depth in Z"))
obj.addProperty("App::PropertyDistance", "FinalDepth", "Depth", translate("Final Depth","Final Depth of Tool- lowest value in Z"))
obj.addProperty("App::PropertyDistance", "RetractHeight", "Depth", translate("Retract Height","The height desired to retract tool when path is finished"))
#Feed Properties
obj.addProperty("App::PropertyLength", "VertFeed", "Feed",translate("Vert Feed","Feed rate (in units per minute) for vertical moves in Z"))
obj.addProperty("App::PropertyLength", "HorizFeed", "Feed",translate("Horiz Feed","Feed rate (in units per minute) for horizontal moves"))
#Profile Properties
obj.addProperty("App::PropertyEnumeration", "Side", "Profile", translate("Side","Side of edge that tool should cut"))
obj.Side = ['left','right','on'] #side of profile that cutter is on in relation to direction of profile
obj.addProperty("App::PropertyEnumeration", "Direction", "Profile",translate("Direction", "The direction that the toolpath should go around the part ClockWise CW or CounterClockWise CCW"))
obj.Direction = ['CW','CCW'] #this is the direction that the profile runs
obj.addProperty("App::PropertyBool","UseComp","Profile",translate("Use Cutter Comp","make True, if using Cutter Radius Compensation"))
obj.addProperty("App::PropertyIntegerList","Edgelist","Profile",translate("Edge List", "List of edges selected"))
obj.addProperty("App::PropertyDistance", "OffsetExtra", "Profile",translate("OffsetExtra","Extra value to stay away from final profile- good for roughing toolpath"))
# obj.addProperty("App::PropertyLength", "SegLen", "Profile",translate("Seg Len","Tesselation value for tool paths made from beziers, bsplines, and ellipses"))
# #Start Point Properties
obj.addProperty("App::PropertyString","StartPtName","Profile",translate("Start Point","The name of the start point of this path"))
obj.addProperty("App::PropertyBool","UseStartPt","Profile",translate("Use Start Point","Make True, if specifying a Start Point"))
# obj.addProperty("App::PropertyLength", "ExtendAtStart", "Profile", translate("extend at start", "extra length of tool path before start of part edge"))
# obj.addProperty("App::PropertyLength", "LeadInLineLen", "Profile", translate("lead in length","length of straight segment of toolpath that comes in at angle to first part edge"))
# #End Point Properties
obj.addProperty("App::PropertyString","EndPtName","Profile",translate("End Point","The name of the end point of this path"))
obj.addProperty("App::PropertyBool","UseEndPt","Profile",translate("Use End Point","Make True, if specifying an End Point"))
# obj.addProperty("App::PropertyLength", "ExtendAtEnd", "Profile", translate("extend at end","extra length of tool path after end of part edge"))
# obj.addProperty("App::PropertyLength", "LeadOutLineLen", "Profile", translate("lead_out_line_len","length of straight segment of toolpath that comes in at angle to last edge selected"))
# obj.addProperty("App::PropertyDistance", "RollRadius", "Profile", translate("Roll Radius","Radius at start and end"))
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self,obj):
if obj.Base:
# tie the toolnumber to the PathLoadTool object ToolNumber
if len(obj.InList)>0: #check to see if obj is in the Project group yet
project = obj.InList[0]
tl = int(PathUtils.changeTool(obj,project))
obj.ToolNumber= tl
tool = PathUtils.getTool(obj,obj.ToolNumber)
if tool:
self.radius = tool.Diameter/2
else:
# temporary value,in case we don't have any tools defined already
self.radius = 0.25
# self.radius = 0.25
self.clearance = obj.ClearanceHeight.Value
self.step_down=obj.StepDown.Value
self.start_depth=obj.StartDepth.Value
self.final_depth=obj.FinalDepth.Value
self.rapid_safety_space=obj.RetractHeight.Value
self.side=obj.Side
self.offset_extra=obj.OffsetExtra.Value
self.use_CRC=obj.UseComp
self.vf=obj.VertFeed.Value
self.hf=obj.HorizFeed.Value
edgelist = []
if obj.StartPtName and obj.UseStartPt:
self.startpt = FreeCAD.ActiveDocument.getObject(obj.StartPtName).Shape
else:
self.startpt = None
if obj.EndPtName and obj.UseEndPt:
self.endpt = FreeCAD.ActiveDocument.getObject(obj.EndPtName).Shape
else:
self.endpt = None
for e in obj.Edgelist:
edgelist.append(FreeCAD.ActiveDocument.getObject(obj.Base[0].Name).Shape.Edges[e-1])
output=PathKurveUtils.makePath(edgelist,self.side,self.radius,self.vf,self.hf,self.offset_extra, \
self.rapid_safety_space,self.clearance,self.start_depth,self.step_down, \
self.final_depth,self.use_CRC,obj.Direction,self.startpt,self.endpt)
if obj.Active:
path = Path.Path(output)
obj.Path = path
obj.ViewObject.Visibility = True
else:
path = Path.Path("(inactive operation)")
obj.Path = path
obj.ViewObject.Visibility = False
class _ViewProviderKurve:
def __init__(self,vobj): #mandatory
vobj.Proxy = self
def __getstate__(self): #mandatory
return None
def __setstate__(self,state): #mandatory
return None
def getIcon(self): #optional
return ":/icons/Path-Kurve.svg"
class CommandPathKurve:
def GetResources(self):
return {'Pixmap' : 'Path-Kurve',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathProfile","Profile"),
'Accel': "P, P",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathProfile","Creates a Path Profile object from selected edges, using libarea for offset algorithm")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
FreeCAD.ActiveDocument.openTransaction(translate("PathKurve","Create a Profile operation using libarea"))
FreeCADGui.addModule("PathScripts.PathKurve")
snippet = '''
import Path
from PathScripts import PathSelection,PathProject,PathUtils
import area
def profileop():
selection = PathSelection.multiSelect()
if not selection:
FreeCAD.Console.PrintError('please select some edges\\n')
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Profile")
PathScripts.PathKurve.PathProfile(obj)
obj.Active = True
PathScripts.PathKurve._ViewProviderKurve(obj.ViewObject)
obj.Base = (FreeCAD.ActiveDocument.getObject(selection['objname']))
elist = []
for e in selection['edgenames']:
elist.append(eval(e.lstrip('Edge')))
obj.Edgelist = elist
if selection['pointnames']:
FreeCAD.Console.PrintMessage('There are points selected.\\n')
if len(selection['pointnames'])>1:
obj.StartPtName = selection['pointnames'][0]
obj.StartPoint= FreeCAD.ActiveDocument.getObject(obj.StartPtName)
obj.EndPtName = selection['pointnames'][-1]
obj.EndPoint=FreeCAD.ActiveDocument.getObject(obj.EndPtName)
else:
obj.StartPtName = selection['pointnames'][0]
obj.StartPoint= FreeCAD.ActiveDocument.getObject(obj.StartPtName)
obj.ClearanceHeight = 2.0
obj.StepDown = 1.0
obj.StartDepth=0.0
obj.FinalDepth=-1.0
obj.RetractHeight = 5.0
obj.Side = 'left'
obj.OffsetExtra = 0.0
if selection['clockwise']:
obj.Direction = 'CW'
else:
obj.Direction = 'CCW'
obj.UseComp = False
project = PathUtils.addToProject(obj)
tl = PathUtils.changeTool(obj,project)
if tl:
obj.ToolNumber = tl
from PathScripts import PathProject,PathUtils,PathKurve, PathKurveUtils,PathSelection
try:
import area
except:
FreeCAD.Console.PrintError('libarea needs to be installed for this command to work\\n')
profileop()
'''
FreeCADGui.doCommand(snippet)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Kurve',CommandPathKurve())
FreeCAD.Console.PrintLog("Loading PathKurve... done\n")
|
fyffyt/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_lasso_and_elasticnet.py
|
249
|
"""
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
|
miccrun/smartcontrol
|
refs/heads/master
|
apps/control/models.py
|
1
|
import json
import re
import urllib
import urllib2
from django.conf import settings
from django.db import models
import apps.control as control_constants
class Room(models.Model):
name = models.CharField(
max_length=30,
default='',
blank=False,
)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'Rooms'
class Device(models.Model):
id = models.CharField(
primary_key=True,
max_length=30,
default='',
blank=False,
)
name = models.CharField(
max_length=30,
default='',
blank=False,
)
room = models.ForeignKey(Room, related_name="devices")
type = models.PositiveSmallIntegerField(
choices=control_constants.DEVICE_CHOICES,
)
format = models.CharField(
max_length=200,
default='',
)
icon = models.CharField(
max_length=100,
default='',
)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
def set_mode(self, new_mode):
mode = DeviceStatus.objects.get(
device=self,
codename="mode",
)
mode.value = new_mode
mode.save()
def get_mode(self):
mode = DeviceStatus.objects.get(
device=self,
codename="mode",
)
return mode.value
def operate(self, operation_codename, param):
try:
operation = DeviceOperation.objects.get(
device=self,
codename=operation_codename,
)
except DeviceOperation.DoesNotExist:
return (False, "Device operation does not exist")
operation_log = OperationLog(
device=self,
operation=operation,
source=control_constants.LOCAL,
)
operation_log.save()
if settings.LOCAL_MODE:
with open("/var/www/smarthome/control_queue", "a") as file:
file.write("%s;%d;%s\n" % (self.id, operation_log.id, operation.command % param))
else:
data = {
"device": self.id,
"log": operation_log.id,
"param": operation.command % param,
}
req = urllib2.Request(settings.API_PATH, urllib.urlencode(data))
try:
urllib2.urlopen(req)
except urllib2.HTTPError, e:
return (False, 'HTTPError = ' + str(e.code))
except urllib2.URLError, e:
return (False, 'URLError = ' + str(e.reason))
return (True, "OK")
def save_operation_log(self, operation_log_id, data):
try:
json_data = json.loads(data)
except ValueError:
return (False, "JSON Decoding Error, bad response format")
if operation_log_id == 0:
try:
operation = DeviceOperation.objects.get(
device=self,
codename=json_data.get("operation", "")
)
except DeviceOperation.DoesNotExist:
return (False, "Device operation does not exist")
operation_log = OperationLog(
device=self,
operation=operation,
source=control_constants.REMOTE,
success=json_data.get("result", 1),
trial=json_data.get("trial", 0),
rtt=json_data.get("rtt", 0),
)
operation_log.save()
else:
try:
operation_log = OperationLog.objects.get(pk=operation_log_id)
except OperationLog.DoesNotExist:
return (False, "Operation log does not exist")
operation_log.success = json_data.get("result", 1)
operation_log.trial = json_data.get("trial", 0)
operation_log.rtt = json_data.get("rtt", 0)
operation_log.save()
return self.save_status_log(json_data.get("message", ""), operation_log)
def save_status_log(self, data, operation_log=None):
group = re.match(self.format, data)
if group:
for status, value in group.groupdict().iteritems():
try:
device_status = DeviceStatus.objects.get(
device=self,
codename=status,
)
except DeviceStatus.DoesNotExist:
continue
if device_status:
device_status.value = value
device_status.save()
status_log = StatusLog(
device=self,
status=device_status,
operation_log=operation_log,
value=value,
)
status_log.save()
else:
return (False, "Bad response format")
return (True, "OK")
def __unicode__(self):
return self.name
class Meta:
db_table = 'Devices'
class DeviceStatus(models.Model):
device = models.ForeignKey(Device, related_name="status")
name = models.CharField(
max_length=30,
default='',
blank=False,
)
codename = models.CharField(
max_length=30,
default='',
blank=False,
)
value = models.CharField(
max_length=30,
default='',
blank=False,
)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'DeviceStatus'
class DeviceOperation(models.Model):
device = models.ForeignKey(Device, related_name="operations")
name = models.CharField(
max_length=30,
default='',
blank=False,
)
codename = models.CharField(
max_length=30,
default='',
blank=False,
)
command = models.CharField(
max_length=100,
blank=False,
)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'DeviceOperations'
class OperationLog(models.Model):
device = models.ForeignKey(Device)
operation = models.ForeignKey(DeviceOperation)
source = models.PositiveSmallIntegerField(
choices=control_constants.OPERATION_SROUCE,
)
success = models.PositiveSmallIntegerField(
choices=control_constants.OPERATION_RESULT,
default=control_constants.OPERATION_FAIL,
)
trial = models.PositiveSmallIntegerField(default=0)
rtt = models.PositiveSmallIntegerField(default=0)
timestamp = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.operation.name
class Meta:
db_table = 'OperationLogs'
class StatusLog(models.Model):
device = models.ForeignKey(Device)
status = models.ForeignKey(DeviceStatus)
operation_log = models.ForeignKey(
OperationLog,
null=True,
)
value = models.CharField(
max_length=30,
default='',
blank=False,
)
timestamp = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.status.name
class Meta:
db_table = 'StatusLogs'
class Config(models.Model):
id = models.CharField(
primary_key=True,
max_length=30,
default='',
blank=False,
)
value = models.CharField(
max_length=100,
default='',
blank=False,
)
created = models.DateTimeField(auto_now_add=True)
changed = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.id
class Meta:
db_table = 'Config'
|
vlegoff/tsunami
|
refs/heads/master
|
src/primaires/scripting/actions/blesser.py
|
1
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action blesser."""
from primaires.scripting.action import Action
from primaires.perso.exceptions.stat import DepassementStat
class ClasseAction(Action):
"""Blesse un personnage.
Cette action ôte des points de vitalité au personnage spécifié. Bien
entendu, si sa vitalité passe à 0, le personnage meurt."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.blesser_personnage, "Personnage", "Fraction")
cls.ajouter_types(cls.avec_adversaire, "Personnage",
"Personnage", "Fraction")
@staticmethod
def blesser_personnage(personnage, valeur):
"""Enlève au personnage la valeur précisée en points de vie."""
try:
personnage.stats.vitalite = personnage.stats.vitalite - int(valeur)
except DepassementStat:
personnage.mourir()
@staticmethod
def avec_adversaire(auteur, victime, valeur):
"""Blesse la victime en déclarant auteur comme l'adversaire.
Cette action est particulièrement utile si vous voulez induire
des dégâts qui doivent provenir d'un autre personnage, présent ou non.
Paramètres à préciser :
* auteur : le personnage à l'auteur des dégâts
* victime : le personnage victime des dégâts
* valeur : la quantité de dégâts.
"""
try:
victime.stats.vitalite = victime.stats.vitalite - int(valeur)
except DepassementStat:
victime.mourir(adversaire=auteur, recompenser=False)
|
Scalr/libcloud
|
refs/heads/trunk
|
libcloud/storage/drivers/ktucloud.py
|
60
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.types import LibcloudError
from libcloud.storage.providers import Provider
from libcloud.storage.drivers.cloudfiles import CloudFilesConnection
from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver
KTUCLOUDSTORAGE_AUTH_URL = "https://ssproxy.ucloudbiz.olleh.com/auth/v1.0"
KTUCLOUDSTORAGE_API_VERSION = "1.0"
class KTUCloudStorageConnection(CloudFilesConnection):
"""
Connection class for the KT UCloud Storage endpoint.
"""
auth_url = KTUCLOUDSTORAGE_AUTH_URL
_auth_version = KTUCLOUDSTORAGE_API_VERSION
def get_endpoint(self):
eps = self.service_catalog.get_endpoints(name='cloudFiles')
if len(eps) == 0:
raise LibcloudError('Could not find specified endpoint')
ep = eps[0]
public_url = ep.url
if not public_url:
raise LibcloudError('Could not find specified endpoint')
return public_url
class KTUCloudStorageDriver(CloudFilesStorageDriver):
"""
Cloudfiles storage driver for the UK endpoint.
"""
type = Provider.KTUCLOUD
name = 'KTUCloud Storage'
connectionCls = KTUCloudStorageConnection
|
mesemus/fedoralink
|
refs/heads/master
|
fedoralink/models.py
|
1
|
import inspect
import logging
import django.dispatch
import rdflib
from io import BytesIO
from django.apps import apps
from rdflib import Literal
from rdflib.namespace import DC, RDF, XSD
from rdflib.term import URIRef
from fedoralink.fedorans import ACL
from .fedorans import FEDORA, EBUCORE
from .manager import FedoraManager
from .rdfmetadata import RDFMetadata
from .type_manager import FedoraTypeManager
from .utils import OrderableModelList
log = logging.getLogger('fedoralink.models')
def get_from_classes(clazz, class_var):
"""
Get a list of class variables with the given name in clazz and all its superclasses. The values are returned
in mro order.
:param clazz: the class which is being queried
:param class_var: class variable name
:return: list of values
"""
ret = []
for clz in reversed(inspect.getmro(clazz)):
if hasattr(clz, class_var):
val = getattr(clz, class_var)
if isinstance(val, list) or isinstance(val, tuple):
ret.extend(val)
else:
ret.append(val)
return ret
class Types:
"""
Helper class which holds RDF types of the object
"""
def __init__(self, metadata):
self.__metadata = metadata
def add(self, a_type):
self.__metadata.add(RDF.type, a_type)
def remove(self, a_type):
self.__metadata.remove(RDF.type, a_type)
def __iter__(self):
return iter(self.__metadata[RDF.type])
def __str__(self):
return str(list(iter(self)))
class FedoraGenericMeta:
pass
class DjangoMetadataBridge:
"""
A _meta implementation for IndexableFedoraObject
"""
def __init__(self, model_class, fields):
self._fields = fields
self.virtual_fields = []
self.concrete_fields = []
self.many_to_many = []
self.verbose_name = getattr(model_class, "verbose_name", model_class.__name__)
self.model_class = model_class
self.private_fields = []
process_fields=set()
for fld in fields:
if fld.name in process_fields:
continue
process_fields.add(fld.name)
fld.null = not fld.required
fld.blank = not fld.required
fld.attname = fld.name
fld.model = model_class
self.concrete_fields.append(fld)
self.fields = self.concrete_fields
self.fields_by_name = {x.name:x for x in self.fields}
self.app_label = model_class.__module__
if self.app_label.endswith('.models'):
self.app_label = self.app_label[:-7]
self.object_name = model_class.__name__
self.apps = apps
class FedoraObjectMetaclass(type):
def __init__(cls, name, bases, attrs):
super(FedoraObjectMetaclass, cls).__init__(name, bases, attrs)
cls.objects = FedoraManager.get_manager(cls)
cls._meta = DjangoMetadataBridge(cls, [])
class FedoraObject(metaclass=FedoraObjectMetaclass):
"""
The base class of all Fedora objects, modelled along Django's model
Most important methods and properties:
.id
.children
.get_bitstream()
.save()
.delete()
.local_bitstream
.create_child()
.create_subcollection()
To get/modify metadata, use [RDF:Name], for example obj[DC.title]. These methods always return a list of metadata.
"""
def __init__(self, **kwargs):
self.metadata = None
self.__connection = None
self.__children = None
self.__is_incomplete = False
if '__metadata' in kwargs:
self.metadata = kwargs['__metadata']
else:
self.metadata = RDFMetadata('')
if '__connection' in kwargs:
self.__connection = kwargs['__connection']
else:
self.__connection = None
if '__slug' in kwargs:
self.__slug = kwargs['__slug']
else:
self.__slug = None
self.types = Types(self.metadata)
self.__local_bitstream = None
# objects are filled in by metaclass, this field is here just to make editors happy
objects = None
"""
Fields that will be used in indexing (LDPath will be created and installed
when ./manage.py config_index <modelname> is called)
"""
@classmethod
def handles_metadata(cls, _metadata):
"""
Returns priority with which this class is able to handle the given metadata, -1 or None if not at all
:type _metadata: RDFMetadata
:param _metadata: the metadata
:return: priority
"""
# although FedoraObject can handle any fedora object, this is hardcoded into FedoraTypeManager,
# this method returns None so that subclasses that do not override it will not be mistakenly used in
# type mapping
return None
@property
def slug(self):
return self.__slug
def save(self):
"""
saves this instance
"""
getattr(type(self), 'objects').save((self,), None)
@classmethod
def save_multiple(cls, objects, connection=None):
"""
saves multiple instances, might optimize the number of calls to Fedora server
"""
getattr(cls, 'objects').save(objects, connection)
@property
def objects_fedora_connection(self):
"""
returns the connection which created this object
"""
return self.__connection
def get_bitstream(self):
"""
returns a TypedStream associated with this node
"""
return self.objects.get_bitstream(self)
def get_local_bitstream(self):
"""
returns a local bitstream ready for upload
:returns TypedStream instance
"""
return self.__local_bitstream
def set_local_bitstream(self, local_bitstream):
"""
sets a local bitstream. Call .save() afterwords to send it to the server
:param local_bitstream instance of TypedStream
"""
self.__local_bitstream = local_bitstream
def created(self):
pass
@property
def children(self):
return self.list_children()
def list_children(self, refetch=True):
return OrderableModelList(get_from_classes(type(self), 'objects')[0].load_children(self, refetch), self)
def list_self_and_descendants(self):
stack = [self]
while len(stack):
el = stack.pop()
yield el
for c in reversed(el.children):
stack.append(c)
def create_child(self, child_name, additional_types=None, flavour=None, slug=None):
child = self._create_child(flavour or FedoraObject, slug)
if additional_types:
for t in additional_types:
child.types.add(t)
for r in FedoraObject.__convert_name_to_literal(child_name):
child.metadata.add(DC.title, r)
child.created()
return child
@staticmethod
def __convert_name_to_literal(child_name):
rr = []
if isinstance(child_name, str):
rr.append(Literal(child_name, datatype=XSD.string))
elif isinstance(child_name, Literal):
if child_name.datatype is None and not child_name.language:
child_name = Literal(child_name.value, datatype=XSD.string)
rr.append(child_name)
else:
for c in child_name:
rr.extend(FedoraObject.__convert_name_to_literal(c))
return rr
def create_subcollection(self, collection_name, additional_types=None, flavour=None, slug=None):
types = [EBUCORE.Collection]
if additional_types:
types.extend(additional_types)
return self.create_child(collection_name, types, flavour=flavour, slug=slug)
def _create_child(self, child_types, slug):
if not isinstance(child_types, list):
child_types = [child_types]
clz = FedoraTypeManager.generate_class(child_types)
ret = clz(id=None, __connection=self.__connection, __slug=slug)
ret[FEDORA.hasParent] = rdflib.URIRef(self.id)
return ret
def delete(self):
getattr(type(self), 'objects').delete(self)
def update(self, fetch_child_metadata=True):
"""
Fetches new data from server and overrides this object's metadata with them
"""
self.metadata = getattr(type(self), 'objects').update(self, fetch_child_metadata).metadata
self.__is_incomplete = False
def set_acl(self, acl_collection):
if isinstance(acl_collection, str):
acl_collection = FedoraObject.objects.get(pk=acl_collection)
self[ACL.accessControl] = URIRef(acl_collection.id)
@property
def id(self):
return self.metadata.id
@property
def local_id(self):
connection = self.objects_fedora_connection
return connection.get_local_id(self.metadata.id)
@property
def is_incomplete(self):
return self.__is_incomplete
@is_incomplete.setter
def is_incomplete(self, val):
self.__is_incomplete = val
@property
def fedora_parent_uri(self):
if FEDORA.hasParent in self.metadata:
return str(self.metadata[FEDORA.hasParent][0])
else:
return None
def __getitem__(self, item):
return self.metadata[item]
def __setitem__(self, key, value):
self.metadata[key] = value
def __delitem__(self, key):
del self.metadata[key]
class UploadedFileStream:
def __init__(self, file):
self.file = file
self.content = BytesIO()
for c in self.file.chunks():
self.content.write(c)
self.content.seek(0)
def read(self):
return self.content.read()
def close(self):
pass
fedora_object_fetched = django.dispatch.Signal(providing_args=["instance", "manager"])
def get_or_create_object(path, save=False):
whole_path = '/'.join(map(lambda x: x['slug'], path))
try:
obj = FedoraObject.objects.get(pk=whole_path)
return obj
except:
pass
if len(path) > 1:
parent = get_or_create_object(path[:-1], True)
else:
parent = FedoraObject.objects.get(pk='')
print("Creating object", path[-1])
obj = parent.create_child(child_name=path[-1]['name'],
slug=path[-1]['slug'],
flavour=path[-1]['flavour'])
if save:
obj.save()
return obj
|
rec/echomesh
|
refs/heads/master
|
code/python/external/pi3d/shape/MergeShape.py
|
1
|
import ctypes
import math
import random
from pi3d.constants import *
from pi3d import Texture
from pi3d.Buffer import Buffer
from pi3d.Shape import Shape
from pi3d.util.RotateVec import rotate_vec_x, rotate_vec_y, rotate_vec_z
class MergeShape(Shape):
""" 3d model inherits from Shape. As there is quite a time penalty for
doing the matrix recalculations and changing the variables being sent to
the shader, each time an object is drawn, it is MUCH faster to use a MergeShape
where several objects will always remain in the same positions relative to
each other. i.e. trees in a forest.
Where the objects have multiple Buffers, each needing a different texture
(i.e. more complex Model objects) each must be combined into a different
MergeShape
"""
def __init__(self, camera=None, light=None, name="",
x=0.0, y=0.0, z=0.0,
rx=0.0, ry=0.0, rz=0.0,
sx=1.0, sy=1.0, sz=1.0,
cx=0.0, cy=0.0, cz=0.0):
"""uses standard constructor for Shape"""
super(MergeShape, self).__init__(camera, light, name, x, y, z, rx, ry, rz, sx, sy, sz,
cx, cy, cz)
if VERBOSE:
print "Creating Merge Shape ..."
self.vertices=[]
self.normals=[]
self.tex_coords=[]
self.indices=[] #stores all indices for single render
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
def merge(self, bufr, x, y, z,
rx=0.0, ry=0.0, rz=0.0,
sx=1.0, sy=1.0, sz=1.0,
cx=0.0, cy=0.0, cz=0.0):
"""merge the vertices, normals etc from this Buffer with those already there
the position, rotation, scale, offset are set according to the origin of
the MergeShape. If bufr is not a Buffer then it will be treated as if it
is a Shape and its first Buffer object will be merged. Argument additional
to standard Shape:
*bufr*
Buffer object or Shape with a member buf[0] that is a Buffer object.
"""
if not(type(bufr) is Buffer):
bufr = bufr.buf[0]
#assert shape.ttype == GL_TRIANGLES # this is always true of Buffer objects
assert len(bufr.vertices) == len(bufr.normals)
if VERBOSE:
print "Merging", bufr.name
vertices = []
normals = []
original_vertex_count = len(self.vertices)
for v in range(0, len(bufr.vertices)):
def rotate_slice(array):
vec = array[v]
if rz:
vec = rotate_vec_z(rz, vec)
if rx:
vec = rotate_vec_x(rx, vec)
if ry:
vec = rotate_vec_y(ry, vec)
return vec
# Scale, offset and store vertices
vx, vy, vz = rotate_slice(bufr.vertices)
self.vertices.append((vx * sx + x, vy * sy + y, vz * sz + z))
# Rotate normals
self.normals.append(rotate_slice(bufr.normals))
self.tex_coords.extend(bufr.tex_coords)
ctypes.restype = ctypes.c_short # TODO: remove this side-effect.
indices = [(i[0] + original_vertex_count, i[1] + original_vertex_count, i[2] + original_vertex_count) for i in bufr.indices]
self.indices.extend(indices)
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
def add(self, bufr, x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,
sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0):
"""wrapper to alias merge method"""
self.merge(bufr, x, y, z, rx, ry, rz, sx, sy, sz, cx, cy, cz)
def cluster(self, bufr, elevmap, xpos, zpos, w, d, count, options, minscl, maxscl):
"""generates a random cluster on an ElevationMap.
Arguments:
*bufr*
Buffer object to merge.
*elevmap*
ElevationMap object to merge onto.
*xpos, zpos*
x and z location of centre of cluster.
*w, d*
x and z direction size of the cluster.
*count*
Number of objects to generate.
*options*
Deprecated.
*minscl*
The minimum scale value for random selection.
*maxscl*
The maximum scale value for random selection.
"""
#create a cluster of shapes on an elevation map
for v in range(count):
x = xpos + random.random() * w - w * 0.5
z = zpos + random.random() * d - d * 0.5
rh = random.random() * (maxscl - minscl) + minscl
rt = random.random() * 360.0
y = elevmap.calcHeight(x, z) + rh * 2
self.merge(bufr, x,y,z, 0.0, rt, 0.0, rh, rh, rh)
def radialCopy(self, bufr, x=0, y=0, z=0, startRadius=2.0, endRadius=2.0,
startAngle=0.0, endAngle=360.0, step=12):
"""generates a radially copied cluster, axix is in the y direction.
Arguments:
*bufr*
Buffer object to merge.
Keyword arguments:
*x,y,z*
Location of centre of cluster relative to origin of MergeShape.
*startRadius*
Start radius.
*endRadius*
End radius.
*startAngle*
Start angle for merging 0 is in +ve x direction.
*andAngle*
End angle for merging, degrees. Rotation is clockwise
looking up the y axis.
*step*
Angle between each copy, degrees NB *NOT* number of steps.
"""
st = (endAngle - startAngle) / step
rst = (endRadius - startRadius) / int(st)
rd = startRadius
sta = startAngle
for r in range(int(st)):
print "merging ", r
ca = math.cos(math.radians(sta))
sa = math.sin(math.radians(sta))
self.merge(bufr, x + ca * rd, y ,z + sa * rd, 0, sta, 0)
sta += step
rd += rst
print "merged all"
|
vgrachev8/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/tf1.py
|
3
|
# coding: utf-8
import json
import re
from .common import InfoExtractor
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
_VALID_URL = r'http://videos\.tf1\.fr/.*-(.*?)\.html'
_TEST = {
u'url': u'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
u'file': u'10635995.mp4',
u'md5': u'2e378cc28b9957607d5e88f274e637d8',
u'info_dict': {
u'title': u'Citroën Grand C4 Picasso 2013 : présentation officielle',
u'description': u'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.',
},
u'skip': u'Sometimes wat serves the whole file with the --test option',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group(1)
webpage = self._download_webpage(url, id)
embed_url = self._html_search_regex(r'"(https://www.wat.tv/embedframe/.*?)"',
webpage, 'embed url')
embed_page = self._download_webpage(embed_url, id, u'Downloading embed player page')
wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
wat_info = self._download_webpage('http://www.wat.tv/interface/contentv3/%s' % wat_id, id, u'Downloading Wat info')
wat_info = json.loads(wat_info)['media']
wat_url = wat_info['url']
return self.url_result(wat_url, 'Wat')
|
smmribeiro/intellij-community
|
refs/heads/master
|
python/testData/completion/mockPatchObject1/lib/unittest/mock.py
|
181
|
def patch():
pass
patch.object = None
|
Peddle/hue
|
refs/heads/master
|
desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/management/__init__.py
|
100
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
gnieboer/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/ops/seq2seq_ops.py
|
61
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops for Sequence to Sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
"""Returns predictions and loss for sequence of predictions.
Args:
decoding: List of Tensors with predictions.
labels: List of Tensors with labels.
sampling_decoding: Optional, List of Tensor with predictions to be used
in sampling. E.g. they shouldn't have dependncy on outputs.
If not provided, decoding is used.
name: Operation name.
Returns:
Predictions and losses tensors.
"""
with ops.name_scope(name, "sequence_classifier", [decoding, labels]):
predictions, xent_list = [], []
for i, pred in enumerate(decoding):
xent_list.append(nn.softmax_cross_entropy_with_logits(
labels=labels[i], logits=pred,
name="sequence_loss/xent_raw{0}".format(i)))
if sampling_decoding:
predictions.append(nn.softmax(sampling_decoding[i]))
else:
predictions.append(nn.softmax(pred))
xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
loss = math_ops.reduce_sum(xent, name="sequence_loss")
return array_ops.stack(predictions, axis=1), loss
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None):
"""Processes inputs for Sequence to Sequence models.
Args:
x: Input Tensor [batch_size, input_length, embed_dim].
y: Output Tensor [batch_size, output_length, embed_dim].
input_length: length of input x.
output_length: length of output y.
sentinel: optional first input to decoder and final output expected.
If sentinel is not provided, zeros are used. Due to fact that y is not
available in sampling time, shape of sentinel will be inferred from x.
name: Operation name.
Returns:
Encoder input from x, and decoder inputs and outputs from y.
"""
with ops.name_scope(name, "seq2seq_inputs", [x, y]):
in_x = array_ops.unstack(x, axis=1)
y = array_ops.unstack(y, axis=1)
if not sentinel:
# Set to zeros of shape of y[0], using x for batch size.
sentinel_shape = array_ops.stack(
[array_ops.shape(x)[0], y[0].get_shape()[1]])
sentinel = array_ops.zeros(sentinel_shape)
sentinel.set_shape(y[0].get_shape())
in_y = [sentinel] + y
out_y = y + [sentinel]
return in_x, in_y, out_y
def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
"""RNN Decoder that creates training and sampling sub-graphs.
Args:
decoder_inputs: Inputs for decoder, list of tensors.
This is used only in training sub-graph.
initial_state: Initial state for the decoder.
cell: RNN cell to use for decoder.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for training and sampling sub-graphs.
"""
with vs.variable_scope(scope or "dnn_decoder"):
states, sampling_states = [initial_state], [initial_state]
outputs, sampling_outputs = [], []
with ops.name_scope("training", values=[decoder_inputs, initial_state]):
for i, inp in enumerate(decoder_inputs):
if i > 0:
vs.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
with ops.name_scope("sampling", values=[initial_state]):
for i, _ in enumerate(decoder_inputs):
if i == 0:
sampling_outputs.append(outputs[i])
sampling_states.append(states[i])
else:
sampling_output, sampling_state = cell(sampling_outputs[-1],
sampling_states[-1])
sampling_outputs.append(sampling_output)
sampling_states.append(sampling_state)
return outputs, states, sampling_outputs, sampling_states
def rnn_seq2seq(encoder_inputs,
decoder_inputs,
encoder_cell,
decoder_cell=None,
dtype=dtypes.float32,
scope=None):
"""RNN Sequence to Sequence model.
Args:
encoder_inputs: List of tensors, inputs for encoder.
decoder_inputs: List of tensors, inputs for decoder.
encoder_cell: RNN cell to use for encoder.
decoder_cell: RNN cell to use for decoder, if None encoder_cell is used.
dtype: Type to initialize encoder state with.
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for trianing and sampling sub-graphs.
"""
with vs.variable_scope(scope or "rnn_seq2seq"):
_, last_enc_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or
encoder_cell)
|
FrancoisRheaultUS/dipy
|
refs/heads/master
|
dipy/io/tests/test_streamline.py
|
8
|
import json
import os
from dipy.data import fetch_gold_standard_io
from dipy.io.streamline import (load_tractogram, save_tractogram,
load_trk, save_trk)
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.utils import create_nifti_header
from dipy.io.vtk import save_vtk_streamlines, load_vtk_streamlines
from dipy.tracking.streamline import Streamlines
import numpy as np
import numpy.testing as npt
import pytest
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.utils.optpkg import optional_package
fury, have_fury, setup_module = optional_package('fury')
filepath_dix = {}
files, folder = fetch_gold_standard_io()
for filename in files:
filepath_dix[filename] = os.path.join(folder, filename)
with open(filepath_dix['points_data.json']) as json_file:
points_data = dict(json.load(json_file))
with open(filepath_dix['streamlines_data.json']) as json_file:
streamlines_data = dict(json.load(json_file))
streamline = np.array([[82.20181274, 91.36505891, 43.15737152],
[82.38442231, 91.79336548, 43.87036514],
[82.48710632, 92.27861023, 44.56298065],
[82.53310394, 92.78545381, 45.24635315],
[82.53793335, 93.26902008, 45.94785309],
[82.48797607, 93.75003815, 46.64939880],
[82.35533142, 94.25181581, 47.32533264],
[82.15484619, 94.76634216, 47.97451019],
[81.90982819, 95.28792572, 48.60244371],
[81.63336945, 95.78153229, 49.23971176],
[81.35479736, 96.24868011, 49.89558792],
[81.08713531, 96.69807434, 50.56812668],
[80.81504822, 97.14285278, 51.24193192],
[80.52591705, 97.56719971, 51.92168427],
[80.26599884, 97.98269653, 52.61848068],
[80.04635621, 98.38131714, 53.33855821],
[79.84691621, 98.77052307, 54.06955338],
[79.57667542, 99.13599396, 54.78985596],
[79.23351288, 99.43207551, 55.51065063],
[78.84815979, 99.64141846, 56.24016571],
[78.47383881, 99.77347565, 56.99299241],
[78.12837219, 99.81330872, 57.76969528],
[77.80438995, 99.85082245, 58.55574799],
[77.49439240, 99.88065338, 59.34777069],
[77.21414185, 99.85343933, 60.15090561],
[76.96416473, 99.82772827, 60.96406937],
[76.74712372, 99.80519104, 61.78676605],
[76.52263641, 99.79122162, 62.60765076],
[76.03757477, 100.08692169, 63.24152374],
[75.44867706, 100.35265351, 63.79513168],
[74.78033447, 100.57255554, 64.27278901],
[74.11605835, 100.77330781, 64.76428986],
[73.51222992, 100.98779297, 65.32373047],
[72.97387695, 101.23387146, 65.93502045],
[72.47355652, 101.49151611, 66.57343292],
[71.99834442, 101.72480774, 67.23979950],
[71.56909181, 101.98665619, 67.92664337],
[71.18083191, 102.29483795, 68.61888123],
[70.81879425, 102.63343048, 69.31127167],
[70.47422791, 102.98672485, 70.00532532],
[70.10092926, 103.28502655, 70.70999908],
[69.69512177, 103.51667023, 71.42147064],
[69.27423096, 103.71351624, 72.13452911],
[68.91260529, 103.81676483, 72.89796448],
[68.60788727, 103.81982422, 73.69258118],
[68.34162903, 103.76619720, 74.49915314],
[68.08542633, 103.70635223, 75.30856323],
[67.83590698, 103.60187531, 76.11553955],
[67.56822968, 103.44821930, 76.90870667],
[67.28399658, 103.25878906, 77.68825531],
[67.00117493, 103.03740692, 78.45989227],
[66.72718048, 102.80329895, 79.23099518],
[66.46197511, 102.54130554, 79.99622345],
[66.20803833, 102.22305298, 80.74387360],
[65.96872711, 101.88980865, 81.48987579],
[65.72864532, 101.59316254, 82.25085449],
[65.47808075, 101.33383942, 83.02194214],
[65.21841431, 101.11295319, 83.80186462],
[64.95678711, 100.94080353, 84.59326935],
[64.71759033, 100.82022095, 85.40114594],
[64.48053741, 100.73490143, 86.21411896],
[64.24304199, 100.65074158, 87.02709198],
[64.01773834, 100.55318451, 87.84204865],
[63.83801651, 100.41996765, 88.66333008],
[63.70982361, 100.25119019, 89.48779297],
[63.60707855, 100.06730652, 90.31262207],
[63.46164322, 99.91001892, 91.13648224],
[63.26287842, 99.78648376, 91.95485687],
[63.03713226, 99.68377686, 92.76905823],
[62.81192398, 99.56619263, 93.58140564],
[62.57145309, 99.42708588, 94.38592529],
[62.32259369, 99.25592804, 95.18167114],
[62.07497787, 99.05770111, 95.97154236],
[61.82253647, 98.83877563, 96.75438690],
[61.59536743, 98.59293365, 97.53706360],
[61.46530151, 98.30503845, 98.32772827],
[61.39904785, 97.97928619, 99.11172485],
[61.33279419, 97.65353394, 99.89572906],
[61.26067352, 97.30914307, 100.67123413],
[61.19459534, 96.96743011, 101.44847107],
[61.19580461, 96.63417053, 102.23215485],
[61.26572037, 96.29887391, 103.01185608],
[61.39840698, 95.96297455, 103.78307343],
[61.57207871, 95.64262391, 104.55268097],
[61.78163528, 95.35540771, 105.32629395],
[62.06700134, 95.09746552, 106.08564758],
[62.39427185, 94.85724641, 106.83369446],
[62.74076462, 94.62278748, 107.57482147],
[63.11461639, 94.40107727, 108.30641937],
[63.53397751, 94.20418549, 109.02002716],
[64.00019836, 94.03809357, 109.71183777],
[64.43580627, 93.87523651, 110.42416382],
[64.84857941, 93.69993591, 111.14715576],
[65.26740265, 93.51858521, 111.86515808],
[65.69511414, 93.36718751, 112.58474731],
[66.10470581, 93.22719574, 113.31711578],
[66.45891571, 93.06028748, 114.07256317],
[66.78582001, 92.90560913, 114.84281921],
[67.11138916, 92.79004669, 115.62040711],
[67.44729614, 92.75711823, 116.40135193],
[67.75688171, 92.98265076, 117.16111755],
[68.02041626, 93.28012848, 117.91371155],
[68.25725555, 93.53466797, 118.69052124],
[68.46047974, 93.63263702, 119.51107788],
[68.62039948, 93.62007141, 120.34690094],
[68.76782227, 93.56475067, 121.18331909],
[68.90222168, 93.46326447, 122.01765442],
[68.99872589, 93.30039978, 122.84759521],
[69.04119873, 93.05428314, 123.66156769],
[69.05086517, 92.74394989, 124.45450592],
[69.02742004, 92.40427399, 125.23509979],
[68.95466614, 92.09059143, 126.02339935],
[68.84975433, 91.79674531, 126.81564331],
[68.72673798, 91.53726196, 127.61715698],
[68.60685731, 91.30300141, 128.42681885],
[68.50636292, 91.12481689, 129.25317383],
[68.39311218, 91.01572418, 130.08976746],
[68.25946808, 90.94654083, 130.92756653]],
dtype=np.float32)
streamlines = Streamlines([streamline[[0, 10]], streamline,
streamline[::2], streamline[::3],
streamline[::5], streamline[::6]])
def io_tractogram(extension):
with InTemporaryDirectory():
fname = 'test.{}'.format(extension)
in_affine = np.eye(4)
in_dimensions = np.array([50, 50, 50])
in_voxel_sizes = np.array([2, 1.5, 1.5])
nii_header = create_nifti_header(in_affine, in_dimensions,
in_voxel_sizes)
sft = StatefulTractogram(streamlines, nii_header, space=Space.RASMM)
save_tractogram(sft, fname, bbox_valid_check=False)
if extension == 'trk':
reference = 'same'
else:
reference = nii_header
sft = load_tractogram(fname, reference, bbox_valid_check=False)
affine, dimensions, voxel_sizes, _ = sft.space_attributes
npt.assert_array_equal(in_affine, affine)
npt.assert_array_equal(in_voxel_sizes, voxel_sizes)
npt.assert_array_equal(in_dimensions, dimensions)
npt.assert_equal(len(sft), len(streamlines))
npt.assert_array_almost_equal(sft.streamlines[1], streamline,
decimal=4)
def test_io_trk():
io_tractogram('trk')
def test_io_tck():
io_tractogram('tck')
@pytest.mark.skipif(not have_fury, reason="Requires FURY")
def test_io_vtk():
io_tractogram('vtk')
def test_io_dpy():
io_tractogram('dpy')
@pytest.mark.skipif(not have_fury, reason="Requires FURY")
def test_low_io_vtk():
with InTemporaryDirectory():
fname = 'test.fib'
# Test save
save_vtk_streamlines(streamlines, fname, binary=True)
tracks = load_vtk_streamlines(fname)
npt.assert_equal(len(tracks), len(streamlines))
npt.assert_array_almost_equal(tracks[1], streamline, decimal=4)
def trk_loader(filename):
try:
with InTemporaryDirectory():
load_trk(filename, filepath_dix['gs.nii'])
return True
except (ValueError):
return False
def trk_saver(filename):
sft = load_tractogram(filepath_dix['gs.trk'], filepath_dix['gs.nii'])
try:
with InTemporaryDirectory():
save_trk(sft, filename)
return True
except (ValueError):
return False
def test_io_trk_load():
npt.assert_(trk_loader(filepath_dix['gs.trk']),
msg='trk_loader should be able to load a trk')
npt.assert_(not trk_loader('fake_file.TRK'),
msg='trk_loader should not be able to load a TRK')
npt.assert_(not trk_loader(filepath_dix['gs.tck']),
msg='trk_loader should not be able to load a tck')
npt.assert_(not trk_loader(filepath_dix['gs.fib']),
msg='trk_loader should not be able to load a fib')
npt.assert_(not trk_loader(filepath_dix['gs.dpy']),
msg='trk_loader should not be able to load a dpy')
def test_io_trk_save():
npt.assert_(trk_saver(filepath_dix['gs.trk']),
msg='trk_saver should be able to save a trk')
npt.assert_(not trk_saver('fake_file.TRK'),
msg='trk_saver should not be able to save a TRK')
npt.assert_(not trk_saver(filepath_dix['gs.tck']),
msg='trk_saver should not be able to save a tck')
npt.assert_(not trk_saver(filepath_dix['gs.fib']),
msg='trk_saver should not be able to save a fib')
npt.assert_(not trk_saver(filepath_dix['gs.dpy']),
msg='trk_saver should not be able to save a dpy')
if __name__ == '__main__':
npt.run_module_suite()
|
pranavtendolkr/horizon
|
refs/heads/master
|
openstack_dashboard/test/integration_tests/pages/admin/system/flavorspage.py
|
36
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class FlavorsPage(basepage.BaseNavigationPage):
DEFAULT_ID = "auto"
FLAVORS_TABLE_NAME_COLUMN_INDEX = 0
_flavors_table_locator = (by.By.ID, 'flavors')
FLAVORS_TABLE_ACTIONS = ("create_flavor", "delete_flavors")
FLAVORS_TABLE_ROW_ACTIONS = {
tables.ComplexActionRowRegion.PRIMARY_ACTION: "edit_flavor",
tables.ComplexActionRowRegion.SECONDARY_ACTIONS: (
"modify_access", "update_metadata", "delete_flavor")
}
CREATE_FLAVOR_FORM_FIELDS = (("name", "id_", "vcpus", "ram",
"root_disk", "ephemeral_disk",
"swap_disk"),
("all_projects", "selected_projects"))
def __init__(self, driver, conf):
super(FlavorsPage, self).__init__(driver, conf)
self._page_title = "Flavors"
def _get_row_with_flavor_name(self, name):
return self.flavors_table.get_row(
self.FLAVORS_TABLE_NAME_COLUMN_INDEX, name)
@property
def flavors_table(self):
src_elem = self._get_element(*self._flavors_table_locator)
return tables.ComplexActionTableRegion(self.driver,
self.conf, src_elem,
self.FLAVORS_TABLE_ACTIONS,
self.FLAVORS_TABLE_ROW_ACTIONS)
@property
def create_flavor_form(self):
return forms.TabbedFormRegion(self.driver, self.conf, None,
self.CREATE_FLAVOR_FORM_FIELDS)
@property
def confirm_delete_flavors_form(self):
return forms.BaseFormRegion(self.driver, self.conf, None)
def create_flavor(self, name, id_=DEFAULT_ID, vcpus=None, ram=None,
root_disk=None, ephemeral_disk=None, swap_disk=None):
self.flavors_table.create_flavor.click()
self.create_flavor_form.name.text = name
if id_ is not None:
self.create_flavor_form.id_.text = id_
self.create_flavor_form.vcpus.value = vcpus
self.create_flavor_form.ram.value = ram
self.create_flavor_form.root_disk.value = root_disk
self.create_flavor_form.ephemeral_disk.value = ephemeral_disk
self.create_flavor_form.swap_disk.value = swap_disk
self.create_flavor_form.submit.click()
self._wait_till_spinner_disappears()
def delete_flavor(self, name):
row = self._get_row_with_flavor_name(name)
row.mark()
self.flavors_table.delete_flavors.click()
self.confirm_delete_flavors_form.submit.click()
self._wait_till_spinner_disappears()
def is_flavor_present(self, name):
return bool(self._get_row_with_flavor_name(name))
|
saullocastro/pyNastran
|
refs/heads/master
|
pyNastran/gui/vtk_examples/mesh_label_image.py
|
1
|
import vtk
meta_image_filename = 'labels.mhd'
# Prepare to read the file
reader_volume = vtk.vtkMetaImageReader()
reader_volume.SetFileName(meta_image_filename)
reader_volume.Update()
# Extract the region of interest
voi = vtk.vtkExtractVOI()
if vtk.VTK_MAJOR_VERSION <= 5:
voi.SetInput(reader_volume.GetOutput())
else:
voi.SetInputConnection(reader_volume.GetOutputPort())
#voi.SetVOI(0,517, 0,228, 0,392)
voi.SetSampleRate(1, 1, 1)
#voi.SetSampleRate(3, 3, 3)
voi.Update() # necessary for GetScalarRange()
srange = voi.GetOutput().GetScalarRange() # needs Update() before!
print("Range", srange)
##Prepare surface generation
#contour = vtk.vtkContourFilter()
#contour = vtk.vtkMarchingCubes()
contour = vtk.vtkDiscreteMarchingCubes() #for label images
if vtk.VTK_MAJOR_VERSION <= 5:
contour.SetInput(voi.GetOutput())
else:
contour.SetInputConnection(voi.GetOutputPort())
contour.ComputeNormalsOn()
##run through all labels
for index in range(1, int(srange[1]) + 1):
print("Doing label", index)
contour.SetValue(0, index)
contour.Update() #needed for GetNumberOfPolys() !!!
smoother = vtk.vtkWindowedSincPolyDataFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
smoother.SetInput(contour.GetOutput())
else:
smoother.SetInputConnection(contour.GetOutputPort())
smoother.SetNumberOfIterations(5)
#smoother.BoundarySmoothingOff()
#smoother.FeatureEdgeSmoothingOff()
#smoother.SetFeatureAngle(120.0)
#smoother.SetPassBand(.001)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
##calc cell normal
triangle_cell_normals = vtk.vtkPolyDataNormals()
if vtk.VTK_MAJOR_VERSION <= 5:
triangle_cell_normals.SetInput(smoother.GetOutput())
else:
triangle_cell_normals.SetInputConnection(smoother.GetOutputPort())
triangle_cell_normals.ComputeCellNormalsOn()
triangle_cell_normals.ComputePointNormalsOff()
triangle_cell_normals.ConsistencyOn()
triangle_cell_normals.AutoOrientNormalsOn()
triangle_cell_normals.Update() #creates vtkPolyData
##calc cell area
triangle_cell_mesh_quality = vtk.vtkMeshQuality()
if vtk.VTK_MAJOR_VERSION <= 5:
triangle_cell_mesh_quality.SetInput(triangle_cell_normals.GetOutput())
else:
triangle_cell_mesh_quality.SetInputConnection(triangle_cell_normals.GetOutputPort())
triangle_cell_mesh_quality.SetTriangleQualityMeasureToArea()
triangle_cell_mesh_quality.SaveCellQualityOn() #default
triangle_cell_mesh_quality.Update() #creates vtkDataSet
point_normal_array = triangle_cell_normals.GetOutput().GetCellData().GetNormals()
quality_array = triangle_cell_mesh_quality.GetOutput().GetCellData().GetArray("Quality")
if point_normal_array.GetNumberOfTuples() != qualityArray.GetNumberOfTuples():
print ("Error! Sizes of normal array and area array dont equal!")
exit(1)
f = open('label_stat' + "_%.4d" % index + ".dat", 'w')
f.write("#cell_index\tarea\tn_x\tn_y\tn_z")
for i in range(0, point_normal_array.GetNumberOfTuples()):
point_normal = point_normal_array.GetTuple3(i) #this is for 3D data in python
area = quality_array.GetValue(i)
f.write('%s %s %s %s %s\n' % (i, area, point_normal[0], point_normal[1], point_normal[2]))
f.close()
|
ezigman/pjsip-upstream
|
refs/heads/master
|
tests/pjsua/scripts-call/301_ice_public_b.py
|
59
|
# $Id: 301_ice_public_b.py 2392 2008-12-22 18:54:58Z bennylp $
#
from inc_cfg import *
# This test:
# to make call with ICE but without STUN.
# Note:
# - need --dis-codec to make INVITE packet less than typical MTU
uas_args = "--null-audio --id=\"<sip:test1@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test1 --password=test1 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g --log-file callee.log"
uac_args = "--null-audio --id=\"<sip:test2@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test2 --password=test2 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g --log-file caller.log"
test_param = TestParam(
"ICE via public internet with no STUN",
[
InstanceParam( "callee", uas_args,
uri="<sip:test1@pjsip.org>",
have_reg=True, have_publish=False),
InstanceParam( "caller", uac_args,
uri="<sip:test2@pjsip.org>",
have_reg=True, have_publish=False),
]
)
|
x111ong/django
|
refs/heads/master
|
django/conf/locale/cy/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
fernandog/Medusa
|
refs/heads/optimized
|
ext/dogpile/cache/backends/__init__.py
|
70
|
from dogpile.cache.region import register_backend
register_backend(
"dogpile.cache.null", "dogpile.cache.backends.null", "NullBackend")
register_backend(
"dogpile.cache.dbm", "dogpile.cache.backends.file", "DBMBackend")
register_backend(
"dogpile.cache.pylibmc", "dogpile.cache.backends.memcached",
"PylibmcBackend")
register_backend(
"dogpile.cache.bmemcached", "dogpile.cache.backends.memcached",
"BMemcachedBackend")
register_backend(
"dogpile.cache.memcached", "dogpile.cache.backends.memcached",
"MemcachedBackend")
register_backend(
"dogpile.cache.memory", "dogpile.cache.backends.memory", "MemoryBackend")
register_backend(
"dogpile.cache.memory_pickle", "dogpile.cache.backends.memory",
"MemoryPickleBackend")
register_backend(
"dogpile.cache.redis", "dogpile.cache.backends.redis", "RedisBackend")
|
rbrito/pkg-youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/mediasite.py
|
11
|
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
mimetype2ext,
str_or_none,
try_get,
unescapeHTML,
unsmuggle_url,
url_or_none,
urljoin,
)
_ID_RE = r'(?:[0-9a-f]{32,34}|[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12,14})'
class MediasiteIE(InfoExtractor):
_VALID_URL = r'(?xi)https?://[^/]+/Mediasite/(?:Play|Showcase/(?:default|livebroadcast)/Presentation)/(?P<id>%s)(?P<query>\?[^#]+|)' % _ID_RE
_TESTS = [
{
'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271681e4f199af3c60d1f82869b1d',
'info_dict': {
'id': '2db6c271681e4f199af3c60d1f82869b1d',
'ext': 'mp4',
'title': 'Lecture: Tuesday, September 20, 2016 - Sir Andrew Wiles',
'description': 'Sir Andrew Wiles: “Equations in arithmetic”\\n\\nI will describe some of the interactions between modern number theory and the problem of solving equations in rational numbers or integers\\u0027.',
'timestamp': 1474268400.0,
'upload_date': '20160919',
},
},
{
'url': 'http://mediasite.uib.no/Mediasite/Play/90bb363295d945d6b548c867d01181361d?catalog=a452b7df-9ae1-46b7-a3ba-aceeb285f3eb',
'info_dict': {
'id': '90bb363295d945d6b548c867d01181361d',
'ext': 'mp4',
'upload_date': '20150429',
'title': '5) IT-forum 2015-Dag 1 - Dungbeetle - How and why Rain created a tiny bug tracker for Unity',
'timestamp': 1430311380.0,
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
'info_dict': {
'id': '585a43626e544bdd97aeb71a0ec907a01d',
'ext': 'mp4',
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
'description': '',
'thumbnail': r're:^https?://.*\.jpg(?:\?.*)?$',
'duration': 7713.088,
'timestamp': 1413309600,
'upload_date': '20141014',
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
'md5': 'ef1fdded95bdf19b12c5999949419c92',
'info_dict': {
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
'ext': 'wmv',
'title': '64ste Vakantiecursus: Afvalwater',
'description': 'md5:7fd774865cc69d972f542b157c328305',
'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$',
'duration': 10853,
'timestamp': 1326446400,
'upload_date': '20120113',
},
},
{
'url': 'http://digitalops.sandia.gov/Mediasite/Play/24aace4429fc450fb5b38cdbf424a66e1d',
'md5': '9422edc9b9a60151727e4b6d8bef393d',
'info_dict': {
'id': '24aace4429fc450fb5b38cdbf424a66e1d',
'ext': 'mp4',
'title': 'Xyce Software Training - Section 1',
'description': r're:(?s)SAND Number: SAND 2013-7800.{200,}',
'upload_date': '20120409',
'timestamp': 1333983600,
'duration': 7794,
}
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Showcase/livebroadcast/Presentation/ada7020854f743c49fbb45c9ec7dbb351d',
'only_matching': True,
},
{
'url': 'https://mediasite.ntnu.no/Mediasite/Showcase/default/Presentation/7d8b913259334b688986e970fae6fcb31d',
'only_matching': True,
},
{
# dashed id
'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271-681e-4f19-9af3-c60d1f82869b1d',
'only_matching': True,
}
]
# look in Mediasite.Core.js (Mediasite.ContentStreamType[*])
_STREAM_TYPES = {
0: 'video1', # the main video
2: 'slide',
3: 'presentation',
4: 'video2', # screencast?
5: 'video3',
}
@staticmethod
def _extract_urls(webpage):
return [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(
r'(?xi)<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:(?:https?:)?//[^/]+)?/Mediasite/Play/%s(?:\?.*?)?)\1' % _ID_RE,
webpage)]
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
resource_id = mobj.group('id')
query = mobj.group('query')
webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer?
redirect_url = urlh.geturl()
# XXX: might have also extracted UrlReferrer and QueryString from the html
service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex(
r'<div[^>]+\bid=["\']ServicePath[^>]+>(.+?)</div>', webpage, resource_id,
default='/Mediasite/PlayerService/PlayerService.svc/json'))
player_options = self._download_json(
'%s/GetPlayerOptions' % service_path, resource_id,
headers={
'Content-type': 'application/json; charset=utf-8',
'X-Requested-With': 'XMLHttpRequest',
},
data=json.dumps({
'getPlayerOptionsRequest': {
'ResourceId': resource_id,
'QueryString': query,
'UrlReferrer': data.get('UrlReferrer', ''),
'UseScreenReader': False,
}
}).encode('utf-8'))['d']
presentation = player_options['Presentation']
title = presentation['Title']
if presentation is None:
raise ExtractorError(
'Mediasite says: %s' % player_options['PlayerPresentationStatusMessage'],
expected=True)
thumbnails = []
formats = []
for snum, Stream in enumerate(presentation['Streams']):
stream_type = Stream.get('StreamType')
if stream_type is None:
continue
video_urls = Stream.get('VideoUrls')
if not isinstance(video_urls, list):
video_urls = []
stream_id = self._STREAM_TYPES.get(
stream_type, 'type%u' % stream_type)
stream_formats = []
for unum, VideoUrl in enumerate(video_urls):
video_url = url_or_none(VideoUrl.get('Location'))
if not video_url:
continue
# XXX: if Stream.get('CanChangeScheme', False), switch scheme to HTTP/HTTPS
media_type = VideoUrl.get('MediaType')
if media_type == 'SS':
stream_formats.extend(self._extract_ism_formats(
video_url, resource_id,
ism_id='%s-%u.%u' % (stream_id, snum, unum),
fatal=False))
elif media_type == 'Dash':
stream_formats.extend(self._extract_mpd_formats(
video_url, resource_id,
mpd_id='%s-%u.%u' % (stream_id, snum, unum),
fatal=False))
else:
stream_formats.append({
'format_id': '%s-%u.%u' % (stream_id, snum, unum),
'url': video_url,
'ext': mimetype2ext(VideoUrl.get('MimeType')),
})
# TODO: if Stream['HasSlideContent']:
# synthesise an MJPEG video stream '%s-%u.slides' % (stream_type, snum)
# from Stream['Slides']
# this will require writing a custom downloader...
# disprefer 'secondary' streams
if stream_type != 0:
for fmt in stream_formats:
fmt['preference'] = -1
thumbnail_url = Stream.get('ThumbnailUrl')
if thumbnail_url:
thumbnails.append({
'id': '%s-%u' % (stream_id, snum),
'url': urljoin(redirect_url, thumbnail_url),
'preference': -1 if stream_type != 0 else 0,
})
formats.extend(stream_formats)
self._sort_formats(formats)
# XXX: Presentation['Presenters']
# XXX: Presentation['Transcript']
return {
'id': resource_id,
'title': title,
'description': presentation.get('Description'),
'duration': float_or_none(presentation.get('Duration'), 1000),
'timestamp': float_or_none(presentation.get('UnixTime'), 1000),
'formats': formats,
'thumbnails': thumbnails,
}
class MediasiteCatalogIE(InfoExtractor):
_VALID_URL = r'''(?xi)
(?P<url>https?://[^/]+/Mediasite)
/Catalog/Full/
(?P<catalog_id>{0})
(?:
/(?P<current_folder_id>{0})
/(?P<root_dynamic_folder_id>{0})
)?
'''.format(_ID_RE)
_TESTS = [{
'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48530d454381549f955d08c75e21',
'info_dict': {
'id': '631f9e48530d454381549f955d08c75e21',
'title': 'WCET Summit: Adaptive Learning in Higher Ed: Improving Outcomes Dynamically',
},
'playlist_count': 6,
'expected_warnings': ['is not a supported codec'],
}, {
# with CurrentFolderId and RootDynamicFolderId
'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521',
'info_dict': {
'id': '9518c4a6c5cf4993b21cbd53e828a92521',
'title': 'IUSM Family and Friends Sessions',
},
'playlist_count': 2,
}, {
'url': 'http://uipsyc.mediasite.com/mediasite/Catalog/Full/d5d79287c75243c58c50fef50174ec1b21',
'only_matching': True,
}, {
# no AntiForgeryToken
'url': 'https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21',
'only_matching': True,
}, {
'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521',
'only_matching': True,
}, {
# dashed id
'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48-530d-4543-8154-9f955d08c75e',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mediasite_url = mobj.group('url')
catalog_id = mobj.group('catalog_id')
current_folder_id = mobj.group('current_folder_id') or catalog_id
root_dynamic_folder_id = mobj.group('root_dynamic_folder_id')
webpage = self._download_webpage(url, catalog_id)
# AntiForgeryToken is optional (e.g. [1])
# 1. https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21
anti_forgery_token = self._search_regex(
r'AntiForgeryToken\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'anti forgery token', default=None, group='value')
if anti_forgery_token:
anti_forgery_header = self._search_regex(
r'AntiForgeryHeaderName\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'anti forgery header name',
default='X-SOFO-AntiForgeryHeader', group='value')
data = {
'IsViewPage': True,
'IsNewFolder': True,
'AuthTicket': None,
'CatalogId': catalog_id,
'CurrentFolderId': current_folder_id,
'RootDynamicFolderId': root_dynamic_folder_id,
'ItemsPerPage': 1000,
'PageIndex': 0,
'PermissionMask': 'Execute',
'CatalogSearchType': 'SearchInFolder',
'SortBy': 'Date',
'SortDirection': 'Descending',
'StartDate': None,
'EndDate': None,
'StatusFilterList': None,
'PreviewKey': None,
'Tags': [],
}
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
}
if anti_forgery_token:
headers[anti_forgery_header] = anti_forgery_token
catalog = self._download_json(
'%s/Catalog/Data/GetPresentationsForFolder' % mediasite_url,
catalog_id, data=json.dumps(data).encode(), headers=headers)
entries = []
for video in catalog['PresentationDetailsList']:
if not isinstance(video, dict):
continue
video_id = str_or_none(video.get('Id'))
if not video_id:
continue
entries.append(self.url_result(
'%s/Play/%s' % (mediasite_url, video_id),
ie=MediasiteIE.ie_key(), video_id=video_id))
title = try_get(
catalog, lambda x: x['CurrentFolder']['Name'], compat_str)
return self.playlist_result(entries, catalog_id, title,)
class MediasiteNamedCatalogIE(InfoExtractor):
_VALID_URL = r'(?xi)(?P<url>https?://[^/]+/Mediasite)/Catalog/catalogs/(?P<catalog_name>[^/?#&]+)'
_TESTS = [{
'url': 'https://msite.misis.ru/Mediasite/Catalog/catalogs/2016-industrial-management-skriabin-o-o',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mediasite_url = mobj.group('url')
catalog_name = mobj.group('catalog_name')
webpage = self._download_webpage(url, catalog_name)
catalog_id = self._search_regex(
r'CatalogId\s*:\s*["\'](%s)' % _ID_RE, webpage, 'catalog id')
return self.url_result(
'%s/Catalog/Full/%s' % (mediasite_url, catalog_id),
ie=MediasiteCatalogIE.ie_key(), video_id=catalog_id)
|
arkarkark/snippy
|
refs/heads/master
|
app/lookup.py
|
1
|
# Copyright 2009 Alex K (wtwf.com) All rights reserved.
__author__ = "wtwf.com (Alex K)"
import urllib
import logging
import os
import socket
from google.appengine.api import urlfetch
from google.appengine.api import users
import jinja2
from wtwf import wtwfhandler
import model
import snippy_config
__pychecker__ = "no-override"
class SnippyHandler(wtwfhandler.WtwfHandler):
def is_iPhone(self):
ua = self.request.headers["User-Agent"]
for part in "iPhone,AppleWebKit,Mobile/,Safari/,".split(","):
if part not in ua:
return False
return True
def get(self, lookup, _):
# Strip off the leading /
path_info = lookup
if not path_info:
path_info = self.request.get("url")
# look up the keyword
snippy = model.GetByKeyword(path_info)
url = None
if snippy:
url = self.GetUrl(snippy)
else:
if "%20" in path_info:
path_info = urllib.unquote(path_info)
elif "+" in path_info and self.is_iPhone():
# friggin mobile safari and default search engines
path_info = path_info.replace("+", " ")
# see if we have a space and then something
parts = path_info.split(" ", 1)
if len(parts) == 2:
snippy = model.GetByKeyword(parts[0])
if snippy:
if self.request.query_string:
parts[1] += "?" + self.request.query_string
# TODO(ark): do we want to support {searchTerms} as well as %s?
url = self.GetUrl(snippy).replace("%s", urllib.quote(parts[1]))
if snippy and "%s" in url and snippy.alt_url:
url = snippy.alt_url
if snippy:
if snippy.private:
if not users.is_current_user_admin():
user = users.get_current_user()
if user:
url = None
else:
self.redirect(users.create_login_url(self.request.uri))
return
if snippy.ip_restrict:
try:
if (
socket.gethostbyname(snippy.ip_restrict)
!= self.request.remote_addr
):
logging.info(
"ip_restrict fail %r or %r == %r",
snippy.ip_restrict,
socket.gethostbyname(snippy.ip_restrict),
self.request.remote_addr,
)
url = None
except socket.error as err:
logging.error(
"error %r when looking up ip_restrict %r != %r trying hackertarget api",
err,
snippy.ip_restrict,
self.request.remote_addr,
)
# try and api
ip_url = (
"https://api.hackertarget.com/dnslookup/?q=%s"
% snippy.ip_restrict
)
result = urlfetch.fetch(ip_url, allow_truncated=True, deadline=1)
ip = result.content.split("\n")[0].split("\t")[-1]
if ip != self.request.remote_addr:
url = None
if url:
try:
snippy.used_count = (snippy.used_count or 0) + 1
snippy.put()
except:
logging.exception(
"unable to +1 counter for: %s (old counter %r)",
snippy.keyword,
snippy.used_count,
)
if self.request.referrer:
self.redirect("/r/" + urllib.quote(url.encode("utf-8")))
else:
# logging.info("No referrer so just using regular redirect")
self.redirect(url.encode("utf-8"))
return
config = snippy_config.SnippyConfig()
default_url = config.get("defaultUrl")
if default_url:
self.redirect(str(jinja2.Template(default_url).render({"url": lookup})))
else:
file_name = os.path.join(os.path.dirname(__file__), "brand/default.html")
file_contents = open(file_name).read()
SEARCH = "<!--search-->"
if users.is_current_user_admin():
file_contents = file_contents.replace(
SEARCH,
'<link title="snippy" rel="search" type="application/opensearchdescription+xml" href="/admin/suggestxml">',
)
else:
file_contents = file_contents.replace(SEARCH, "<!-- not admin -->")
self.response.out.write(file_contents)
|
SauloAislan/ironic
|
refs/heads/SauloAislan-WIP
|
ironic/api/controllers/link.py
|
13
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from wsme import types as wtypes
from ironic.api.controllers import base
def build_url(resource, resource_args, bookmark=False, base_url=None):
if base_url is None:
base_url = pecan.request.public_url
template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
# FIXME(lucasagomes): I'm getting a 404 when doing a GET on
# a nested resource that the URL ends with a '/'.
# https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs
template += '%(args)s' if resource_args.startswith('?') else '/%(args)s'
return template % {'url': base_url, 'res': resource, 'args': resource_args}
class Link(base.APIBase):
"""A link representation."""
href = wtypes.text
"""The url of a link."""
rel = wtypes.text
"""The name of a link."""
type = wtypes.text
"""Indicates the type of document/link."""
@staticmethod
def make_link(rel_name, url, resource, resource_args,
bookmark=False, type=wtypes.Unset):
href = build_url(resource, resource_args,
bookmark=bookmark, base_url=url)
return Link(href=href, rel=rel_name, type=type)
@classmethod
def sample(cls):
sample = cls(href="http://localhost:6385/chassis/"
"eaaca217-e7d8-47b4-bb41-3f99f20eed89",
rel="bookmark")
return sample
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/src/Lib/plat-linux2/CDROM.py
|
330
|
# Generated by h2py from /usr/include/linux/cdrom.h
CDROMPAUSE = 0x5301
CDROMRESUME = 0x5302
CDROMPLAYMSF = 0x5303
CDROMPLAYTRKIND = 0x5304
CDROMREADTOCHDR = 0x5305
CDROMREADTOCENTRY = 0x5306
CDROMSTOP = 0x5307
CDROMSTART = 0x5308
CDROMEJECT = 0x5309
CDROMVOLCTRL = 0x530a
CDROMSUBCHNL = 0x530b
CDROMREADMODE2 = 0x530c
CDROMREADMODE1 = 0x530d
CDROMREADAUDIO = 0x530e
CDROMEJECT_SW = 0x530f
CDROMMULTISESSION = 0x5310
CDROM_GET_MCN = 0x5311
CDROM_GET_UPC = CDROM_GET_MCN
CDROMRESET = 0x5312
CDROMVOLREAD = 0x5313
CDROMREADRAW = 0x5314
CDROMREADCOOKED = 0x5315
CDROMSEEK = 0x5316
CDROMPLAYBLK = 0x5317
CDROMREADALL = 0x5318
CDROMGETSPINDOWN = 0x531d
CDROMSETSPINDOWN = 0x531e
CDROMCLOSETRAY = 0x5319
CDROM_SET_OPTIONS = 0x5320
CDROM_CLEAR_OPTIONS = 0x5321
CDROM_SELECT_SPEED = 0x5322
CDROM_SELECT_DISC = 0x5323
CDROM_MEDIA_CHANGED = 0x5325
CDROM_DRIVE_STATUS = 0x5326
CDROM_DISC_STATUS = 0x5327
CDROM_CHANGER_NSLOTS = 0x5328
CDROM_LOCKDOOR = 0x5329
CDROM_DEBUG = 0x5330
CDROM_GET_CAPABILITY = 0x5331
CDROMAUDIOBUFSIZ = 0x5382
DVD_READ_STRUCT = 0x5390
DVD_WRITE_STRUCT = 0x5391
DVD_AUTH = 0x5392
CDROM_SEND_PACKET = 0x5393
CDROM_NEXT_WRITABLE = 0x5394
CDROM_LAST_WRITTEN = 0x5395
CDROM_PACKET_SIZE = 12
CGC_DATA_UNKNOWN = 0
CGC_DATA_WRITE = 1
CGC_DATA_READ = 2
CGC_DATA_NONE = 3
CD_MINS = 74
CD_SECS = 60
CD_FRAMES = 75
CD_SYNC_SIZE = 12
CD_MSF_OFFSET = 150
CD_CHUNK_SIZE = 24
CD_NUM_OF_CHUNKS = 98
CD_FRAMESIZE_SUB = 96
CD_HEAD_SIZE = 4
CD_SUBHEAD_SIZE = 8
CD_EDC_SIZE = 4
CD_ZERO_SIZE = 8
CD_ECC_SIZE = 276
CD_FRAMESIZE = 2048
CD_FRAMESIZE_RAW = 2352
CD_FRAMESIZE_RAWER = 2646
CD_FRAMESIZE_RAW1 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE)
CD_FRAMESIZE_RAW0 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE)
CD_XA_HEAD = (CD_HEAD_SIZE+CD_SUBHEAD_SIZE)
CD_XA_TAIL = (CD_EDC_SIZE+CD_ECC_SIZE)
CD_XA_SYNC_HEAD = (CD_SYNC_SIZE+CD_XA_HEAD)
CDROM_LBA = 0x01
CDROM_MSF = 0x02
CDROM_DATA_TRACK = 0x04
CDROM_LEADOUT = 0xAA
CDROM_AUDIO_INVALID = 0x00
CDROM_AUDIO_PLAY = 0x11
CDROM_AUDIO_PAUSED = 0x12
CDROM_AUDIO_COMPLETED = 0x13
CDROM_AUDIO_ERROR = 0x14
CDROM_AUDIO_NO_STATUS = 0x15
CDC_CLOSE_TRAY = 0x1
CDC_OPEN_TRAY = 0x2
CDC_LOCK = 0x4
CDC_SELECT_SPEED = 0x8
CDC_SELECT_DISC = 0x10
CDC_MULTI_SESSION = 0x20
CDC_MCN = 0x40
CDC_MEDIA_CHANGED = 0x80
CDC_PLAY_AUDIO = 0x100
CDC_RESET = 0x200
CDC_IOCTLS = 0x400
CDC_DRIVE_STATUS = 0x800
CDC_GENERIC_PACKET = 0x1000
CDC_CD_R = 0x2000
CDC_CD_RW = 0x4000
CDC_DVD = 0x8000
CDC_DVD_R = 0x10000
CDC_DVD_RAM = 0x20000
CDS_NO_INFO = 0
CDS_NO_DISC = 1
CDS_TRAY_OPEN = 2
CDS_DRIVE_NOT_READY = 3
CDS_DISC_OK = 4
CDS_AUDIO = 100
CDS_DATA_1 = 101
CDS_DATA_2 = 102
CDS_XA_2_1 = 103
CDS_XA_2_2 = 104
CDS_MIXED = 105
CDO_AUTO_CLOSE = 0x1
CDO_AUTO_EJECT = 0x2
CDO_USE_FFLAGS = 0x4
CDO_LOCK = 0x8
CDO_CHECK_TYPE = 0x10
CD_PART_MAX = 64
CD_PART_MASK = (CD_PART_MAX - 1)
GPCMD_BLANK = 0xa1
GPCMD_CLOSE_TRACK = 0x5b
GPCMD_FLUSH_CACHE = 0x35
GPCMD_FORMAT_UNIT = 0x04
GPCMD_GET_CONFIGURATION = 0x46
GPCMD_GET_EVENT_STATUS_NOTIFICATION = 0x4a
GPCMD_GET_PERFORMANCE = 0xac
GPCMD_INQUIRY = 0x12
GPCMD_LOAD_UNLOAD = 0xa6
GPCMD_MECHANISM_STATUS = 0xbd
GPCMD_MODE_SELECT_10 = 0x55
GPCMD_MODE_SENSE_10 = 0x5a
GPCMD_PAUSE_RESUME = 0x4b
GPCMD_PLAY_AUDIO_10 = 0x45
GPCMD_PLAY_AUDIO_MSF = 0x47
GPCMD_PLAY_AUDIO_TI = 0x48
GPCMD_PLAY_CD = 0xbc
GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e
GPCMD_READ_10 = 0x28
GPCMD_READ_12 = 0xa8
GPCMD_READ_CDVD_CAPACITY = 0x25
GPCMD_READ_CD = 0xbe
GPCMD_READ_CD_MSF = 0xb9
GPCMD_READ_DISC_INFO = 0x51
GPCMD_READ_DVD_STRUCTURE = 0xad
GPCMD_READ_FORMAT_CAPACITIES = 0x23
GPCMD_READ_HEADER = 0x44
GPCMD_READ_TRACK_RZONE_INFO = 0x52
GPCMD_READ_SUBCHANNEL = 0x42
GPCMD_READ_TOC_PMA_ATIP = 0x43
GPCMD_REPAIR_RZONE_TRACK = 0x58
GPCMD_REPORT_KEY = 0xa4
GPCMD_REQUEST_SENSE = 0x03
GPCMD_RESERVE_RZONE_TRACK = 0x53
GPCMD_SCAN = 0xba
GPCMD_SEEK = 0x2b
GPCMD_SEND_DVD_STRUCTURE = 0xad
GPCMD_SEND_EVENT = 0xa2
GPCMD_SEND_KEY = 0xa3
GPCMD_SEND_OPC = 0x54
GPCMD_SET_READ_AHEAD = 0xa7
GPCMD_SET_STREAMING = 0xb6
GPCMD_START_STOP_UNIT = 0x1b
GPCMD_STOP_PLAY_SCAN = 0x4e
GPCMD_TEST_UNIT_READY = 0x00
GPCMD_VERIFY_10 = 0x2f
GPCMD_WRITE_10 = 0x2a
GPCMD_WRITE_AND_VERIFY_10 = 0x2e
GPCMD_SET_SPEED = 0xbb
GPCMD_PLAYAUDIO_TI = 0x48
GPCMD_GET_MEDIA_STATUS = 0xda
GPMODE_R_W_ERROR_PAGE = 0x01
GPMODE_WRITE_PARMS_PAGE = 0x05
GPMODE_AUDIO_CTL_PAGE = 0x0e
GPMODE_POWER_PAGE = 0x1a
GPMODE_FAULT_FAIL_PAGE = 0x1c
GPMODE_TO_PROTECT_PAGE = 0x1d
GPMODE_CAPABILITIES_PAGE = 0x2a
GPMODE_ALL_PAGES = 0x3f
GPMODE_CDROM_PAGE = 0x0d
DVD_STRUCT_PHYSICAL = 0x00
DVD_STRUCT_COPYRIGHT = 0x01
DVD_STRUCT_DISCKEY = 0x02
DVD_STRUCT_BCA = 0x03
DVD_STRUCT_MANUFACT = 0x04
DVD_LAYERS = 4
DVD_LU_SEND_AGID = 0
DVD_HOST_SEND_CHALLENGE = 1
DVD_LU_SEND_KEY1 = 2
DVD_LU_SEND_CHALLENGE = 3
DVD_HOST_SEND_KEY2 = 4
DVD_AUTH_ESTABLISHED = 5
DVD_AUTH_FAILURE = 6
DVD_LU_SEND_TITLE_KEY = 7
DVD_LU_SEND_ASF = 8
DVD_INVALIDATE_AGID = 9
DVD_LU_SEND_RPC_STATE = 10
DVD_HOST_SEND_RPC_STATE = 11
DVD_CPM_NO_COPYRIGHT = 0
DVD_CPM_COPYRIGHTED = 1
DVD_CP_SEC_NONE = 0
DVD_CP_SEC_EXIST = 1
DVD_CGMS_UNRESTRICTED = 0
DVD_CGMS_SINGLE = 2
DVD_CGMS_RESTRICTED = 3
CDROM_MAX_SLOTS = 256
|
mrroach/CentralServer
|
refs/heads/master
|
csrv/model/cards/runner/card01045_test.py
|
1
|
import unittest
from csrv.model import deck
from csrv.model import game
from csrv.model import premade_decks
from csrv.model import test_base
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards.runner import card01045
class TestCard01045(test_base.TestBase):
RUNNER_DECK = 'Maker Core'
def setUp(self):
test_base.TestBase.setUp(self)
self.card = card01045.Card01045(self.game, self.game.runner)
self.game.runner.grip.add(self.card)
def test_prevent_damage(self):
self.game.runner.credits.set(4)
self.game.runner.rig.add(self.card)
self.game.insert_next_phase(
timing_phases.TakeNetDamage(self.game, self.runner, 2))
self.assertIn(self.card._prevent_net_damage,
self.game.current_phase().choices())
self.game.resolve_current_phase(self.card._prevent_net_damage, None)
# pylint: disable=E1101
self.assertEqual(1, self.game.current_phase().damage)
self.assertEqual(0, len(self.game.current_phase().choices()))
if __name__ == '__main__':
unittest.main()
|
MER-GROUP/intellij-community
|
refs/heads/master
|
python/testData/paramInfo/SimpleClassFunction.py
|
83
|
class A:
def foo(self, a):
pass
a = A()
A.foo(<arg1>a, <arg2>1)
|
aspidites/beets
|
refs/heads/master
|
beets/util/__init__.py
|
1
|
# This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
from __future__ import division
import os
import sys
import re
import shutil
import fnmatch
from collections import defaultdict
import traceback
MAX_FILENAME_LENGTH = 200
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super(HumanReadableException, self).__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, basestring):
return self.reason
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return u'"{0}"'.format(self.reason)
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error(u'{0}: {1}'.format(self.error_kind, self.args[0]))
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super(FilesystemError, self).__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = 'while {0} {1} to {2}'.format(
self._gerund(), repr(self.paths[0]), repr(self.paths[1])
)
elif self.verb in ('delete',):
clause = 'while {0} {1}'.format(
self._gerund(), repr(self.paths[0])
)
else:
clause = 'during {0} of paths {1}'.format(
self.verb, u', '.join(repr(p) for p in self.paths)
)
return u'{0} {1}'.format(self._reasonstr(), clause)
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
return os.path.normpath(os.path.abspath(os.path.expanduser(path)))
def ancestry(path, pathmod=None):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
"""
pathmod = pathmod or os.path
out = []
last_path = None
while path:
path = pathmod.dirname(path)
if path == last_path:
break
last_path = path
if path: # don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=()):
"""Like ``os.walk``, but yields things in sorted, breadth-first
order. Directory and file names matching any glob pattern in
``ignore`` are skipped.
"""
# Make sure the path isn't a Unicode string.
path = bytestring_path(path)
# Get all the directories and files at this level.
dirs = []
files = []
for base in os.listdir(path):
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists and yield the current level.
dirs.sort()
files.sort()
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from _sorted_walk(cur)
for res in sorted_walk(cur, ignore):
yield res
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
os.mkdir(syspath(ancestor))
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Filenames in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root)+1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
if all(fn in clutter for fn in os.listdir(directory)):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
except OSError:
break
else:
break
def components(path, pathmod=None):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
"""
pathmod = pathmod or os.path
comps = []
ances = ancestry(path, pathmod)
for anc in ances:
comp = pathmod.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = pathmod.basename(path)
if last:
comps.append(last)
return comps
def bytestring_path(path):
"""Given a path, which is either a str or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, str):
return path
# Try to encode with default encodings, but fall back to UTF8.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
return path.encode(encoding)
except (UnicodeError, LookupError):
return path.encode('utf8')
def displayable_path(path):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user.
"""
if isinstance(path, unicode):
return path
elif not isinstance(path, str):
# A non-string object: just get its unicode representation.
return unicode(path)
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
return path.decode(encoding, 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore')
def syspath(path, pathmod=None):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to unicode before they are sent to the OS.
"""
pathmod = pathmod or os.path
windows = pathmod.__name__ == 'ntpath'
# Don't do anything if we're not on windows
if not windows:
return path
if not isinstance(path, unicode):
# Try to decode with default encodings, but fall back to UTF8.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
path = path.decode(encoding, 'replace')
except UnicodeError:
path = path.decode('utf8', 'replace')
# Add the magic prefix if it isn't already there
if not path.startswith(u'\\\\?\\'):
path = u'\\\\?\\' + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False, pathmod=os.path):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and pathmod.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False, pathmod=os.path):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if pathmod.exists(dest):
raise FilesystemError('file exists', 'rename', (path, dest),
traceback.format_exc())
# First, try renaming the file.
try:
os.rename(path, dest)
except OSError:
# Otherwise, copy and delete the original.
try:
shutil.copyfile(path, dest)
os.remove(path)
except (OSError, IOError) as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(r'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
new_path = '%s.%i%s' % (base, num, ext)
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(ur'[\\/]'), u'_'), # / and \ -- forbidden everywhere.
(re.compile(ur'^\.'), u'_'), # Leading dot (hidden files on Unix).
(re.compile(ur'[\x00-\x1f]'), u''), # Control characters.
(re.compile(ur'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters".
(re.compile(ur'\.$'), u'_'), # Trailing dots.
(re.compile(ur'\s+$'), u''), # Trailing whitespace.
]
def sanitize_path(path, pathmod=None, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements for the platform; it must be a
list of (compiled regex, replacement string) pairs.
"""
pathmod = pathmod or os.path
# Choose the appropriate replacements.
if not replacements:
replacements = list(CHAR_REPLACE)
comps = components(path, pathmod)
if not comps:
return ''
for i, comp in enumerate(comps):
# Replace special characters.
for regex, repl in replacements:
comp = regex.sub(repl, comp)
# Truncate each component.
comp = comp[:MAX_FILENAME_LENGTH]
comps[i] = comp
return pathmod.join(*comps)
def sanitize_for_path(value, pathmod, key=None):
"""Sanitize the value for inclusion in a path: replace separators
with _, etc. Doesn't guarantee that the whole path will be valid;
you should still call sanitize_path on the complete path.
"""
if isinstance(value, basestring):
for sep in (pathmod.sep, pathmod.altsep):
if sep:
value = value.replace(sep, u'_')
elif key in ('track', 'tracktotal', 'disc', 'disctotal'):
# Pad indices with zeros.
value = u'%02i' % (value or 0)
elif key == 'year':
value = u'%04i' % (value or 0)
elif key in ('month', 'day'):
value = u'%02i' % (value or 0)
elif key == 'bitrate':
# Bitrate gets formatted as kbps.
value = u'%ikbps' % ((value or 0) // 1000)
elif key == 'samplerate':
# Sample rate formatted as kHz.
value = u'%ikHz' % ((value or 0) // 1000)
else:
value = unicode(value)
return value
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
if value.lower() in ('yes', '1', 'true', 't', 'y'):
return True
else:
return False
def levenshtein(s1, s2):
"""A nice DP edit distance implementation from Wikibooks:
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/
Levenshtein_distance#Python
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def plurality(objs):
"""Given a sequence of comparable objects, returns the object that
is most common in the set and the frequency of that object. The
sequence must contain at least one object.
"""
# Calculate frequencies.
freqs = defaultdict(int)
for obj in objs:
freqs[obj] += 1
if not freqs:
raise ValueError('sequence must be non-empty')
# Find object with maximum frequency.
max_freq = 0
res = None
for obj, freq in freqs.items():
if freq > max_freq:
max_freq = freq
res = obj
return res, max_freq
|
agaffney/ansible
|
refs/heads/devel
|
test/units/utils/test_vars.py
|
50
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015, Toshio Kuraotmi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from units.compat import mock, unittest
from ansible.errors import AnsibleError
from ansible.utils.vars import combine_vars, merge_hash
class TestVariableUtils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
combine_vars_merge_data = (
dict(
a=dict(a=1),
b=dict(b=2),
result=dict(a=1, b=2),
),
dict(
a=dict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
),
dict(
a=defaultdict(a=1, c=defaultdict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=defaultdict(a=1, b=2, c=defaultdict(foo='bar', baz='bam'))
),
)
combine_vars_replace_data = (
dict(
a=dict(a=1),
b=dict(b=2),
result=dict(a=1, b=2)
),
dict(
a=dict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=dict(a=1, b=2, c=dict(baz='bam'))
),
dict(
a=defaultdict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=defaultdict(baz='bam')),
result=defaultdict(a=1, b=2, c=defaultdict(baz='bam'))
),
)
def test_combine_vars_improper_args(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
with self.assertRaises(AnsibleError):
combine_vars([1, 2, 3], dict(a=1))
with self.assertRaises(AnsibleError):
combine_vars(dict(a=1), [1, 2, 3])
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
with self.assertRaises(AnsibleError):
combine_vars([1, 2, 3], dict(a=1))
with self.assertRaises(AnsibleError):
combine_vars(dict(a=1), [1, 2, 3])
def test_combine_vars_replace(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
for test in self.combine_vars_replace_data:
self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
def test_combine_vars_merge(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
for test in self.combine_vars_merge_data:
self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
merge_hash_data = {
"low_prio": {
"a": {
"a'": {
"x": "low_value",
"y": "low_value",
"list": ["low_value"]
}
},
"b": [1, 1, 2, 3]
},
"high_prio": {
"a": {
"a'": {
"y": "high_value",
"z": "high_value",
"list": ["high_value"]
}
},
"b": [3, 4, 4, {"5": "value"}]
}
}
def test_merge_hash_simple(self):
for test in self.combine_vars_merge_data:
self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["high_value"]
}
},
"b": high['b']
}
self.assertEqual(merge_hash(low, high), expected)
def test_merge_hash_non_recursive_and_list_replace(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = high
self.assertEqual(merge_hash(low, high, False, 'replace'), expected)
def test_merge_hash_non_recursive_and_list_keep(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": high['a'],
"b": low['b']
}
self.assertEqual(merge_hash(low, high, False, 'keep'), expected)
def test_merge_hash_non_recursive_and_list_append(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": high['a'],
"b": low['b'] + high['b']
}
self.assertEqual(merge_hash(low, high, False, 'append'), expected)
def test_merge_hash_non_recursive_and_list_prepend(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": high['a'],
"b": high['b'] + low['b']
}
self.assertEqual(merge_hash(low, high, False, 'prepend'), expected)
def test_merge_hash_non_recursive_and_list_append_rp(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": high['a'],
"b": [1, 1, 2] + high['b']
}
self.assertEqual(merge_hash(low, high, False, 'append_rp'), expected)
def test_merge_hash_non_recursive_and_list_prepend_rp(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": high['a'],
"b": high['b'] + [1, 1, 2]
}
self.assertEqual(merge_hash(low, high, False, 'prepend_rp'), expected)
def test_merge_hash_recursive_and_list_replace(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["high_value"]
}
},
"b": high['b']
}
self.assertEqual(merge_hash(low, high, True, 'replace'), expected)
def test_merge_hash_recursive_and_list_keep(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["low_value"]
}
},
"b": low['b']
}
self.assertEqual(merge_hash(low, high, True, 'keep'), expected)
def test_merge_hash_recursive_and_list_append(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["low_value", "high_value"]
}
},
"b": low['b'] + high['b']
}
self.assertEqual(merge_hash(low, high, True, 'append'), expected)
def test_merge_hash_recursive_and_list_prepend(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["high_value", "low_value"]
}
},
"b": high['b'] + low['b']
}
self.assertEqual(merge_hash(low, high, True, 'prepend'), expected)
def test_merge_hash_recursive_and_list_append_rp(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["low_value", "high_value"]
}
},
"b": [1, 1, 2] + high['b']
}
self.assertEqual(merge_hash(low, high, True, 'append_rp'), expected)
def test_merge_hash_recursive_and_list_prepend_rp(self):
low = self.merge_hash_data['low_prio']
high = self.merge_hash_data['high_prio']
expected = {
"a": {
"a'": {
"x": "low_value",
"y": "high_value",
"z": "high_value",
"list": ["high_value", "low_value"]
}
},
"b": high['b'] + [1, 1, 2]
}
self.assertEqual(merge_hash(low, high, True, 'prepend_rp'), expected)
|
sureleo/leetcode
|
refs/heads/master
|
archive/python/dynamic_programming/PalindromePartitioningII.py
|
2
|
class Solution:
# @param s, a string
# @return an integer
def minCut(self, s):
matrix = [[False for i in xrange(len(s))] for i in xrange(len(s))]
for i in xrange(len(s)):
matrix[i][i] = True
for i in xrange(len(s)-1):
if s[i+1] == s[i]:
matrix[i][i+1] = True
else:
matrix[i][i+1] = False
for j in xrange(len(s)-2):
for i in xrange(len(s)-j-2):
matrix[i][j+i+2] = s[j+i+2] == s[i] and matrix[i+1][i+j+1]
opt = [0 for i in xrange(len(s)+1)]
for i in range(len(s)+1):
opt[i] = len(s) - i
for i in range(len(s)-1, -1, -1):
for j in range(i, len(s)):
if matrix[i][j]:
print(i, j)
opt[i] = min(1+opt[j+1], opt[i])
return opt[0]-1
if __name__ == "__main__":
solution = Solution()
print solution.minCut("abaca")
|
quantenschaum/piripherals
|
refs/heads/master
|
tests/test_util.py
|
1
|
import pytest
from unittest.mock import Mock, patch
from piripherals.util import *
from time import sleep
def test_fork():
func = Mock()
fork(func)
sleep(0.3)
func.assert_called_once_with()
def test_not_raising(capsys):
func = Mock()
func.side_effect = Exception('Boom!')
func2 = not_raising(func)
func2()
assert func.called
out, err = capsys.readouterr()
assert 'Boom!' in err
def test_on_change(tmpdir):
foo = tmpdir.join('foo')
print(foo)
with foo.open('w') as f:
f.write('foo')
callback = Mock()
on_change(str(foo), callback, delay=0.1)
sleep(0.3)
assert not callback.called
with foo.open('w') as f:
f.write('bar')
sleep(0.3)
assert callback.called
@patch('piripherals.util.GPIO', create=True)
def test_irq_handler(gpio, capsys):
gpio.input.return_value = gpio.HIGH
callback = Mock()
# must continue to work, even when exception was raised in callback
callback.side_effect = Exception('oops')
h = IRQHandler(4, callback)
assert len(callback.mock_calls) == 0
trigger = gpio.add_event_detect.call_args[0][2]
trigger()
sleep(0.3)
assert len(callback.mock_calls) == 1
trigger()
sleep(0.3)
assert len(callback.mock_calls) == 2
out, err = capsys.readouterr()
assert 'oops' in err
def test_poller(capsys):
callback = Mock()
# must continue to work, even when exception was raised in callback
callback.side_effect = Exception('oops')
Poller(callback, delay=0.2)
sleep(0.3)
assert len(callback.mock_calls) == 2
out, err = capsys.readouterr()
assert 'oops' in err
|
jriegel/FreeCAD
|
refs/heads/dev-assembly-next
|
src/Mod/Path/PathScripts/linuxcnc_post.py
|
20
|
#***************************************************************************
#* (c) sliptonic (shopinthewoods@gmail.com) 2014 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************/
'''
This is a postprocessor file for the Path workbench. It is used to
take a pseudo-gcode fragment outputted by a Path object, and output
real GCode suitable for a linuxcnc 3 axis mill. This postprocessor, once placed
in the appropriate PathScripts folder, can be used directly from inside FreeCAD,
via the GUI importer or via python scripts with:
import linuxcnc_post
linuxcnc_post.export(object,"/path/to/file.ncc")
'''
import datetime
now = datetime.datetime.now()
from PathScripts import PostUtils
#These globals set common customization preferences
OUTPUT_COMMENTS = True
OUTPUT_HEADER = True
OUTPUT_LINE_NUMBERS = False
SHOW_EDITOR = True
MODAL = False #if true commands are suppressed if the same as previous line.
COMMAND_SPACE = " "
LINENR = 100 #line number starting value
#These globals will be reflected in the Machine configuration of the project
UNITS = "G21" #G21 for metric, G20 for us standard
MACHINE_NAME = "Millstone"
CORNER_MIN = {'x':0, 'y':0, 'z':0 }
CORNER_MAX = {'x':500, 'y':300, 'z':300 }
#Preamble text will appear at the beginning of the GCODE output file.
PREAMBLE = '''G17 G90
'''
#Postamble text will appear following the last operation.
POSTAMBLE = '''M05
G00 X-1.0 Y1.0
G17 G90
M2
'''
#Pre operation text will be inserted before every operation
PRE_OPERATION = ''''''
#Post operation text will be inserted after every operation
POST_OPERATION = ''''''
#Tool Change commands will be inserted before a tool change
TOOL_CHANGE = ''''''
# to distinguish python built-in open function from the one declared below
if open.__module__ == '__builtin__':
pythonopen = open
def export(objectslist,filename):
global UNITS
for obj in objectslist:
if not hasattr(obj,"Path"):
print "the object " + obj.Name + " is not a path. Please select only path and Compounds."
return
print "postprocessing..."
gcode = ""
#Find the machine.
#The user my have overriden post processor defaults in the GUI. Make sure we're using the current values in the Machine Def.
myMachine = None
for pathobj in objectslist:
if hasattr(pathobj,"Group"): #We have a compound or project.
for p in pathobj.Group:
if p.Name == "Machine":
myMachine = p
if myMachine is None:
print "No machine found in this project"
else:
if myMachine.MachineUnits == "Metric":
UNITS = "G21"
else:
UNITS = "G20"
# write header
if OUTPUT_HEADER:
gcode += linenumber() + "(Exported by FreeCAD)\n"
gcode += linenumber() + "(Post Processor: " + __name__ +")\n"
gcode += linenumber() + "(Output Time:"+str(now)+")\n"
#Write the preamble
if OUTPUT_COMMENTS: gcode += linenumber() + "(begin preamble)\n"
for line in PREAMBLE.splitlines(True):
gcode += linenumber() + line
gcode += linenumber() + UNITS + "\n"
for obj in objectslist:
#do the pre_op
if OUTPUT_COMMENTS: gcode += linenumber() + "(begin operation: " + obj.Label + ")\n"
for line in PRE_OPERATION.splitlines(True):
gcode += linenumber() + line
gcode += parse(obj)
#do the post_op
if OUTPUT_COMMENTS: gcode += linenumber() + "(finish operation: " + obj.Label + ")\n"
for line in POST_OPERATION.splitlines(True):
gcode += linenumber() + line
#do the post_amble
if OUTPUT_COMMENTS: gcode += "(begin postamble)\n"
for line in POSTAMBLE.splitlines(True):
gcode += linenumber() + line
if SHOW_EDITOR:
dia = PostUtils.GCodeEditorDialog()
dia.editor.setText(gcode)
result = dia.exec_()
if result:
final = dia.editor.toPlainText()
else:
final = gcode
else:
final = gcode
print "done postprocessing."
gfile = pythonopen(filename,"wb")
gfile.write(gcode)
gfile.close()
def linenumber():
global LINENR
if OUTPUT_LINE_NUMBERS == True:
LINENR += 10
return "N" + str(LINENR) + " "
return ""
def parse(pathobj):
out = ""
lastcommand = None
#params = ['X','Y','Z','A','B','I','J','K','F','S'] #This list control the order of parameters
params = ['X','Y','Z','A','B','I','J','F','S','T','Q','R','L'] #linuxcnc doesn't want K properties on XY plane Arcs need work.
if hasattr(pathobj,"Group"): #We have a compound or project.
if OUTPUT_COMMENTS: out += linenumber() + "(compound: " + pathobj.Label + ")\n"
for p in pathobj.Group:
out += parse(p)
return out
else: #parsing simple path
if not hasattr(pathobj,"Path"): #groups might contain non-path things like stock.
return out
if OUTPUT_COMMENTS: out += linenumber() + "(Path: " + pathobj.Label + ")\n"
for c in pathobj.Path.Commands:
outstring = []
command = c.Name
outstring.append(command)
# if modal: only print the command if it is not the same as the last one
if MODAL == True:
if command == lastcommand:
outstring.pop(0)
# Now add the remaining parameters in order
for param in params:
if param in c.Parameters:
if param == 'F':
outstring.append(param + format(c.Parameters['F'], '.2f'))
elif param == 'T':
outstring.append(param + str(c.Parameters['T']))
else:
outstring.append(param + format(c.Parameters[param], '.4f'))
# store the latest command
lastcommand = command
# Check for Tool Change:
if command == 'M6':
if OUTPUT_COMMENTS: out += linenumber() + "(begin toolchange)\n"
for line in TOOL_CHANGE.splitlines(True):
out += linenumber() + line
if command == "message":
if OUTPUT_COMMENTS == False:
out = []
else:
outstring.pop(0) #remove the command
#prepend a line number and append a newline
if len(outstring) >= 1:
if OUTPUT_LINE_NUMBERS:
outstring.insert(0,(linenumber()))
#append the line to the final output
for w in outstring:
out += w + COMMAND_SPACE
out = out.strip() + "\n"
return out
print __name__ + " gcode postprocessor loaded."
|
iancze/Pysplotter
|
refs/heads/master
|
conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# Pysplotter documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 10 15:19:07 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pysplotter'
copyright = u'2011, Ian Czekala'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pysplotterdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pysplotter.tex', u'Pysplotter Documentation',
u'Ian Czekala', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysplotter', u'Pysplotter Documentation',
[u'Ian Czekala'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Pysplotter'
epub_author = u'Ian Czekala'
epub_publisher = u'Ian Czekala'
epub_copyright = u'2011, Ian Czekala'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
delhivery/django
|
refs/heads/master
|
tests/utils_tests/test_module_loading.py
|
281
|
import imp
import os
import sys
import unittest
from importlib import import_module
from zipimport import zipimporter
from django.test import SimpleTestCase, modify_settings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import (
autodiscover_modules, import_string, module_has_submodule,
)
class DefaultLoader(unittest.TestCase):
def setUp(self):
sys.meta_path.insert(0, ProxyFinder())
def tearDown(self):
sys.meta_path.pop(0)
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('utils_tests.test_module')
test_no_submodule = import_module(
'utils_tests.test_no_submodule')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('utils_tests.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.no_such_module')
# A child that doesn't exist, but is the name of a package on the path
self.assertFalse(module_has_submodule(test_module, 'django'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.django')
# Don't be confused by caching of import misses
import types # NOQA: causes attempted import of utils_tests.types
self.assertFalse(module_has_submodule(sys.modules['utils_tests'], 'types'))
# A module which doesn't have a __path__ (so no submodules)
self.assertFalse(module_has_submodule(test_no_submodule, 'anything'))
self.assertRaises(ImportError, import_module,
'utils_tests.test_no_submodule.anything')
class EggLoader(unittest.TestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class ModuleImportTestCase(unittest.TestCase):
def test_import_string(self):
cls = import_string('django.utils.module_loading.import_string')
self.assertEqual(cls, import_string)
# Test exceptions raised
self.assertRaises(ImportError, import_string, 'no_dots_in_path')
msg = 'Module "utils_tests" does not define a "unexistent" attribute'
with six.assertRaisesRegex(self, ImportError, msg):
import_string('utils_tests.unexistent')
@modify_settings(INSTALLED_APPS={'append': 'utils_tests.test_module'})
class AutodiscoverModulesTestCase(SimpleTestCase):
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('utils_tests.test_module.another_bad_module', None)
sys.modules.pop('utils_tests.test_module.another_good_module', None)
sys.modules.pop('utils_tests.test_module.bad_module', None)
sys.modules.pop('utils_tests.test_module.good_module', None)
sys.modules.pop('utils_tests.test_module', None)
def test_autodiscover_modules_found(self):
autodiscover_modules('good_module')
def test_autodiscover_modules_not_found(self):
autodiscover_modules('missing_module')
def test_autodiscover_modules_found_but_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('bad_module')
def test_autodiscover_modules_several_one_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('good_module', 'bad_module')
def test_autodiscover_modules_several_found(self):
autodiscover_modules('good_module', 'another_good_module')
def test_autodiscover_modules_several_found_with_registry(self):
from .test_module import site
autodiscover_modules('good_module', 'another_good_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_keeps_intact(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_bad_module', register_to=site)
self.assertEqual(site._registry, {})
def test_validate_registry_resets_after_erroneous_module(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_good_module', 'another_bad_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_resets_after_missing_module(self):
from .test_module import site
autodiscover_modules('does_not_exist', 'another_good_module', 'does_not_exist2', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
class ProxyFinder(object):
def __init__(self):
self._cache = {}
def find_module(self, fullname, path=None):
tail = fullname.rsplit('.', 1)[-1]
try:
fd, fn, info = imp.find_module(tail, path)
if fullname in self._cache:
old_fd = self._cache[fullname][0]
if old_fd:
old_fd.close()
self._cache[fullname] = (fd, fn, info)
except ImportError:
return None
else:
return self # this is a loader as well
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
fd, fn, info = self._cache[fullname]
try:
return imp.load_module(fullname, fd, fn, info)
finally:
if fd:
fd.close()
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
|
factorlibre/OCB
|
refs/heads/8.0
|
addons/portal_sale/__openerp__.py
|
380
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Sale',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a Sales menu to your portal as soon as sale and portal are installed.
======================================================================================
After installing this module, portal users will be able to access their own documents
via the following menus:
- Quotations
- Sale Orders
- Delivery Orders
- Products (public ones)
- Invoices
- Payments/Refunds
If online payment acquirers are configured, portal users will also be given the opportunity to
pay online on their Sale Orders and Invoices that are not paid yet. Paypal is included
by default, you simply need to configure a Paypal account in the Accounting/Invoicing settings.
""",
'author': 'OpenERP SA',
'depends': ['sale', 'portal', 'payment'],
'data': [
'security/portal_security.xml',
'portal_sale_view.xml',
'portal_sale_data.xml',
'res_config_view.xml',
'security/ir.model.access.csv',
],
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
alexlo03/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_audit_policy_system.py
|
28
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Noah Sparks <nsparks@outlook.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: win_audit_policy_system
short_description: Used to make changes to the system wide Audit Policy
description:
- Used to make changes to the system wide Audit Policy.
- It is recommended to take a backup of the policies before adjusting them for the first time.
- See this page for in depth information U(https://technet.microsoft.com/en-us/library/cc766468.aspx).
version_added: "2.5"
author:
- Noah Sparks (@nwsparks)
options:
category:
description:
- Single string value for the category you would like to adjust the policy on.
- Cannot be used with I(subcategory). You must define one or the other.
- Changing this setting causes all subcategories to be adjusted to the defined I(audit_type).
subcategory:
description:
- Single string value for the subcategory you would like to adjust the policy on.
- Cannot be used with I(category). You must define one or the other.
audit_type:
description:
- The type of event you would like to audit for.
- Accepts a list. See examples.
required: yes
type: list
choices: [ failure, none, success ]
'''
EXAMPLES = r'''
- name: enable failure auditing for the subcategory "File System"
win_audit_policy_system:
subcategory: File System
audit_type: failure
- name: enable all auditing types for the category "Account logon events"
win_audit_policy_system:
category: Account logon events
audit_type: success, failure
- name: disable auditing for the subcategory "File System"
win_audit_policy_system:
subcategory: File System
audit_type: none
'''
RETURN = '''
current_audit_policy:
description: details on the policy being targetted
returned: always
type: dictionary
sample: |-
{
"File Share":"failure"
}
'''
|
abusalimov/pytest
|
refs/heads/master
|
testing/cx_freeze/tox_run.py
|
200
|
"""
Called by tox.ini: uses the generated executable to run the tests in ./tests/
directory.
.. note:: somehow calling "build/runtests_script" directly from tox doesn't
seem to work (at least on Windows).
"""
if __name__ == '__main__':
import os
import sys
executable = os.path.join(os.getcwd(), 'build', 'runtests_script')
if sys.platform.startswith('win'):
executable += '.exe'
sys.exit(os.system('%s tests' % executable))
|
otadmor/Open-Knesset
|
refs/heads/master
|
mks/migrations/0028_knesset_18.py
|
10
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
orm.Knesset.objects.get_or_create(
number=18,
defaults={'start_date': datetime.date(2009, 2, 24),
'end_date': datetime.date(2012, 10, 16)})
def backwards(self, orm):
"Write your backwards methods here."
orm.Knesset.objects.filter(number=18).delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mks.coalitionmembership': {
'Meta': {'ordering': "('party', 'start_date')", 'object_name': 'CoalitionMembership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coalition_memberships'", 'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.correlation': {
'Meta': {'object_name': 'Correlation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m1'", 'to': "orm['mks.Member']"}),
'm2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m2'", 'to': "orm['mks.Member']"}),
'normalized_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_same_party': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.memberaltname': {
'Meta': {'object_name': 'MemberAltname'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.weeklypresence': {
'Meta': {'object_name': 'WeeklyPresence'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['mks']
symmetrical = True
|
nemesisdesign/django
|
refs/heads/master
|
django/contrib/sessions/backends/db.py
|
118
|
import logging
from django.contrib.sessions.backends.base import (
CreateError, SessionBase, UpdateError,
)
from django.core.exceptions import SuspiciousOperation
from django.db import DatabaseError, IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, force_update=not must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
except DatabaseError:
if not must_create:
raise UpdateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete()
|
Weil0ng/gem5
|
refs/heads/master
|
configs/common/SysPaths.py
|
32
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import os, sys
from os.path import join as joinpath
from os import environ as env
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
def searchpath(path, filename):
for p in path:
f = joinpath(p, filename)
if os.path.exists(f):
return f
raise IOError, "Can't find file '%s' on path." % filename
def disk(filename):
system()
return searchpath(disk.path, filename)
def binary(filename):
system()
return searchpath(binary.path, filename)
def script(filename):
system()
return searchpath(script.path, filename)
def system():
if not system.path:
try:
path = env['M5_PATH'].split(':')
except KeyError:
path = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ]
# expand '~' and '~user' in paths
path = map(os.path.expanduser, path)
# filter out non-existent directories
system.path = filter(os.path.isdir, path)
if not system.path:
raise IOError, "Can't find a path to system files."
if not binary.path:
binary.path = [joinpath(p, 'binaries') for p in system.path]
if not disk.path:
disk.path = [joinpath(p, 'disks') for p in system.path]
if not script.path:
script.path = [joinpath(config_root, 'boot')]
system.path = None
binary.path = None
disk.path = None
script.path = None
|
Samuel789/MediPi
|
refs/heads/master
|
MedManagementWeb/env/lib/python3.5/site-packages/pytz/tzinfo.py
|
380
|
'''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone if is_dst=True
# Choose the latest (by UTC) applicable timezone if is_dst=False
# i.e., behave like end-of-DST transition
dates = {} # utc -> local
for local_dt in filtered_possible_loc_dt:
utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
assert utc_time not in dates
dates[utc_time] = local_dt
return dates[[min, max][not is_dst](dates)]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
|
appsembler/edx-platform
|
refs/heads/appsembler/tahoe/master
|
scripts/xsslint/tests/test_utils.py
|
15
|
from unittest import TestCase
from ddt import data, ddt
from xsslint.utils import StringLines
@ddt
class TestStringLines(TestCase):
"""
Test StringLines class.
"""
@data(
{'string': 'test', 'index': 0, 'line_start_index': 0, 'line_end_index': 4},
{'string': 'test', 'index': 2, 'line_start_index': 0, 'line_end_index': 4},
{'string': 'test', 'index': 3, 'line_start_index': 0, 'line_end_index': 4},
{'string': '\ntest', 'index': 0, 'line_start_index': 0, 'line_end_index': 1},
{'string': '\ntest', 'index': 2, 'line_start_index': 1, 'line_end_index': 5},
{'string': '\ntest\n', 'index': 0, 'line_start_index': 0, 'line_end_index': 1},
{'string': '\ntest\n', 'index': 2, 'line_start_index': 1, 'line_end_index': 6},
{'string': '\ntest\n', 'index': 6, 'line_start_index': 6, 'line_end_index': 6},
)
def test_string_lines_start_end_index(self, data):
"""
Test StringLines index_to_line_start_index and index_to_line_end_index.
"""
lines = StringLines(data['string'])
self.assertEqual(lines.index_to_line_start_index(data['index']), data['line_start_index'])
self.assertEqual(lines.index_to_line_end_index(data['index']), data['line_end_index'])
@data(
{'string': 'test', 'line_number': 1, 'line': 'test'},
{'string': '\ntest', 'line_number': 1, 'line': ''},
{'string': '\ntest', 'line_number': 2, 'line': 'test'},
{'string': '\ntest\n', 'line_number': 1, 'line': ''},
{'string': '\ntest\n', 'line_number': 2, 'line': 'test'},
{'string': '\ntest\n', 'line_number': 3, 'line': ''},
)
def test_string_lines_start_end_index(self, data):
"""
Test line_number_to_line.
"""
lines = StringLines(data['string'])
self.assertEqual(lines.line_number_to_line(data['line_number']), data['line'])
|
gojira/tensorflow
|
refs/heads/master
|
tensorflow/contrib/timeseries/python/timeseries/ar_model.py
|
14
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Auto-Regressive models for time series data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
class FlatPredictionModel(training.Model):
"""Flattens input and output windows and puts them through dense layers.
This model does not operate on its own, but rather is a plugin to
`ARModel`. See `ARModel`'s constructor documentation
(`prediction_model_factory`) for a usage example.
"""
def __init__(self,
num_features,
input_window_size,
output_window_size,
hidden_layer_sizes=None):
"""Construct the flat prediction model.
Args:
num_features: number of input features per time step.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
hidden_layer_sizes: list of sizes of hidden layers.
"""
super(FlatPredictionModel, self).__init__()
self._input_flatten = core.Flatten()
self._output_flatten = core.Flatten()
if hidden_layer_sizes:
self._hidden_layers = sequential.Sequential([
core.Dense(layer_size, activation=nn_ops.relu)
for layer_size in hidden_layer_sizes])
else:
self._hidden_layers = None
self._mean_transform = core.Dense(num_features * output_window_size,
name="predicted_mean")
self._covariance_transform = core.Dense(num_features * output_window_size,
name="log_sigma_square")
self._prediction_shape = [-1, output_window_size, num_features]
def call(self, input_window_features, output_window_features):
"""Compute predictions from input and output windows.
Args:
input_window_features: A floating point Tensor with shape [batch size,
input window size, input features]. The batch dimension may not have
static shape information, but the window size and number of input
features are known at graph construction time and recorded in the static
shape information for the `input_window_features` `Tensor`. Note that
`input_window_size` may be zero.
output_window_features: A floating point Tensor with shape [batch size,
output window size, output features]. As with `input_window_features`,
the last two dimensions have static shape information. If there are no
output features, the size of the last dimension will be zero.
Returns:
A dictionary of predictions with keys "mean" and "covariance" (only
diagonal covariances are currently supported). Each has shape
[batch size, output window size, num_features], where num_features is the
same as the constructor argument.
"""
if input_window_features.shape[1].value == 0:
# TODO(allenl): Make reshape()'s static shape information work on
# zero-size Tensors? Currently this special case is required because
# otherwise the Dense layers get unknown last dimensions.
activation = self._output_flatten(output_window_features)
elif output_window_features.shape[2].value == 0:
activation = self._input_flatten(input_window_features)
else:
activation = array_ops.concat(
[self._input_flatten(input_window_features),
self._output_flatten(output_window_features)],
axis=1)
if self._hidden_layers:
activation = self._hidden_layers(activation)
predicted_mean = array_ops.reshape(
self._mean_transform(activation),
self._prediction_shape)
predicted_covariance = array_ops.reshape(
gen_math_ops.exp(self._covariance_transform(activation)),
self._prediction_shape)
return {"mean": predicted_mean,
"covariance": predicted_covariance}
class LSTMPredictionModel(training.Model):
"""A simple encoder/decoder model using an LSTM.
This model does not operate on its own, but rather is a plugin to
`ARModel`. See `ARModel`'s constructor documentation
(`prediction_model_factory`) for a usage example.
"""
def __init__(self,
num_features,
input_window_size,
output_window_size,
num_units=128):
"""Construct the LSTM prediction model.
Args:
num_features: number of input features per time step.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_units: The number of units in the encoder and decoder LSTM cells.
"""
super(LSTMPredictionModel, self).__init__()
self._encoder = lstm_ops.LSTMBlockFusedCell(
num_units=num_units, name="encoder")
self._decoder = lstm_ops.LSTMBlockFusedCell(
num_units=num_units, name="decoder")
self._mean_transform = core.Dense(num_features,
name="mean_transform")
self._covariance_transform = core.Dense(num_features,
name="covariance_transform")
def call(self, input_window_features, output_window_features):
"""Compute predictions from input and output windows."""
# Convert to time major
input_window_features = array_ops.transpose(input_window_features,
[1, 0, 2])
output_window_features = array_ops.transpose(output_window_features,
[1, 0, 2])
_, encoder_state = self._encoder(
input_window_features, dtype=self.dtype)
decoder_output, _ = self._decoder(
output_window_features, dtype=self.dtype,
initial_state=encoder_state)
# Switch back to batch major
decoder_output = array_ops.transpose(decoder_output, [1, 0, 2])
predicted_mean = self._mean_transform(decoder_output)
predicted_covariance = gen_math_ops.exp(
self._covariance_transform(decoder_output))
return {"mean": predicted_mean,
"covariance": predicted_covariance}
class ARModel(model.TimeSeriesModel):
"""Auto-regressive model, both linear and non-linear.
Features to the model include time and values of input_window_size timesteps,
and times for output_window_size timesteps. These are passed through a
configurable prediction model, and then fed to a loss function (e.g. squared
loss).
Note that this class can also be used to regress against time only by setting
the input_window_size to zero.
"""
SQUARED_LOSS = "squared_loss"
NORMAL_LIKELIHOOD_LOSS = "normal_likelihood_loss"
def __init__(self,
periodicities,
input_window_size,
output_window_size,
num_features,
prediction_model_factory=FlatPredictionModel,
num_time_buckets=10,
loss=NORMAL_LIKELIHOOD_LOSS,
exogenous_feature_columns=None):
"""Constructs an auto-regressive model.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature. Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_features: number of input features per time step.
prediction_model_factory: A callable taking arguments `num_features`,
`input_window_size`, and `output_window_size` and returning a
`tf.keras.Model`. The `Model`'s `call()` takes two arguments: an input
window and an output window, and returns a dictionary of
predictions. See `FlatPredictionModel` for an example. Example usage:
```python
model = ar_model.ARModel(
periodicities=2, num_features=3,
prediction_model_factory=functools.partial(
FlatPredictionModel,
hidden_layer_sizes=[10, 10]))
```
The default model computes predictions as a linear function of flattened
input and output windows.
num_time_buckets: Number of buckets into which to divide (time %
periodicity) for generating time based features.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data (if input statistics are available).
exogenous_feature_columns: A list of `tf.feature_column`s (for example
`tf.feature_column.embedding_column`) corresponding to exogenous
features which provide extra information to the model but are not part
of the series to be predicted. Passed to
`tf.feature_column.input_layer`.
"""
self._model_factory = prediction_model_factory
self.input_window_size = input_window_size
self.output_window_size = output_window_size
self.window_size = self.input_window_size + self.output_window_size
self.loss = loss
super(ARModel, self).__init__(
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns)
if exogenous_feature_columns is not None:
self.exogenous_size = self._get_exogenous_embedding_shape()[-1]
else:
self.exogenous_size = 0
assert num_time_buckets > 0
self._buckets = int(num_time_buckets)
if periodicities is None or not periodicities:
periodicities = []
elif (not isinstance(periodicities, list) and
not isinstance(periodicities, tuple)):
periodicities = [periodicities]
self._periods = [int(p) for p in periodicities]
for p in self._periods:
assert p > 0
assert len(self._periods) or self.input_window_size
assert output_window_size > 0
def initialize_graph(self, input_statistics=None):
super(ARModel, self).initialize_graph(input_statistics=input_statistics)
self._model_scope = variable_scope.variable_scope(
# The trailing slash means we strip all enclosing variable_scopes, which
# unfortunately is necessary because the model gets called inside and
# outside a "while" scope (for prediction and training respectively),
# and the variables names need to match.
"model/", use_resource=True)
self._model_instance = self._model_factory(
num_features=self.num_features,
input_window_size=self.input_window_size,
output_window_size=self.output_window_size)
def get_start_state(self):
# State which matches the format we'll return later. Typically this will not
# be used by the model directly, but the shapes and dtypes should match so
# that the serving input_receiver_fn gets placeholder shapes correct.
return (array_ops.zeros([self.input_window_size], dtype=dtypes.int64),
array_ops.zeros(
[self.input_window_size, self.num_features], dtype=self.dtype),
array_ops.zeros(
[self.input_window_size, self.exogenous_size],
dtype=self.dtype))
# TODO(allenl,agarwal): Support sampling for AR.
def random_model_parameters(self, seed=None):
pass
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None):
pass
def _predicted_covariance_op(self, activations, num_values):
activation, activation_size = activations[-1]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
log_sigma_square = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * num_values,
name="log_sigma_square",
activation=None)
predicted_covariance = gen_math_ops.exp(log_sigma_square)
predicted_covariance = array_ops.reshape(
predicted_covariance, [-1, self.output_window_size, num_values])
else:
shape = array_ops.stack([
array_ops.shape(activation)[0],
constant_op.constant(self.output_window_size),
constant_op.constant(num_values)
])
predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)
return predicted_covariance
def _predicted_mean_op(self, activations):
activation, activation_size = activations[-1]
predicted_mean = model_utils.fully_connected(
activation,
activation_size,
self.output_window_size * self.num_features,
name="predicted_mean",
activation=None)
return array_ops.reshape(predicted_mean,
[-1, self.output_window_size, self.num_features])
def prediction_ops(self, times, values, exogenous_regressors):
"""Compute model predictions given input data.
Args:
times: A [batch size, self.window_size] integer Tensor, the first
self.input_window_size times in each part of the batch indicating
input features, and the last self.output_window_size times indicating
prediction times.
values: A [batch size, self.input_window_size, self.num_features] Tensor
with input features.
exogenous_regressors: A [batch size, self.window_size,
self.exogenous_size] Tensor with exogenous features.
Returns:
Tuple (predicted_mean, predicted_covariance), where each element is a
Tensor with shape [batch size, self.output_window_size,
self.num_features].
"""
times.get_shape().assert_is_compatible_with([None, self.window_size])
batch_size = array_ops.shape(times)[0]
if self.input_window_size:
values.get_shape().assert_is_compatible_with(
[None, self.input_window_size, self.num_features])
if exogenous_regressors is not None:
exogenous_regressors.get_shape().assert_is_compatible_with(
[None, self.window_size, self.exogenous_size])
# Create input features.
input_window_features = []
input_feature_size = 0
output_window_features = []
output_feature_size = 0
if self._periods:
_, time_features = self._compute_time_features(times)
num_time_features = self._buckets * len(self._periods)
time_features = array_ops.reshape(
time_features,
[batch_size,
self.window_size,
num_time_features])
input_time_features, output_time_features = array_ops.split(
time_features, (self.input_window_size, self.output_window_size),
axis=1)
input_feature_size += num_time_features
output_feature_size += num_time_features
input_window_features.append(input_time_features)
output_window_features.append(output_time_features)
if self.input_window_size:
inp = array_ops.slice(values, [0, 0, 0], [-1, self.input_window_size, -1])
input_window_features.append(
array_ops.reshape(
inp,
[batch_size, self.input_window_size, self.num_features]))
input_feature_size += self.num_features
if self.exogenous_size:
input_exogenous_features, output_exogenous_features = array_ops.split(
exogenous_regressors,
(self.input_window_size, self.output_window_size),
axis=1)
input_feature_size += self.exogenous_size
output_feature_size += self.exogenous_size
input_window_features.append(input_exogenous_features)
output_window_features.append(output_exogenous_features)
assert input_window_features
input_window_features = array_ops.concat(input_window_features, axis=2)
if output_window_features:
output_window_features = array_ops.concat(output_window_features, axis=2)
else:
output_window_features = array_ops.zeros(
[batch_size, self.output_window_size, 0],
dtype=self.dtype)
static_batch_size = times.get_shape()[0].value
input_window_features.set_shape(
[static_batch_size, self.input_window_size, input_feature_size])
output_window_features.set_shape(
[static_batch_size, self.output_window_size, output_feature_size])
return self._output_window_predictions(input_window_features,
output_window_features)
def _output_window_predictions(
self, input_window_features, output_window_features):
with self._model_scope:
predictions = self._model_instance(
input_window_features, output_window_features)
result_shape = [None, self.output_window_size, self.num_features]
for v in predictions.values():
v.set_shape(result_shape)
return predictions
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
covariance = prediction_ops["covariance"]
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
normal = distributions.Normal(loc=targets, scale=sigma)
loss_op = -math_ops.reduce_sum(normal.log_prob(prediction))
else:
assert self.loss == ARModel.SQUARED_LOSS, self.loss
loss_op = math_ops.reduce_sum(math_ops.square(prediction - targets))
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), loss_op.dtype)
return loss_op
def _process_exogenous_features(self, times, features):
embedded = super(ARModel, self)._process_exogenous_features(
times=times, features=features)
if embedded is None:
assert self.exogenous_size == 0
# No embeddings. Return a zero-size [batch, times, 0] array so we don't
# have to special case it downstream.
return array_ops.zeros(
array_ops.concat([array_ops.shape(times), constant_op.constant([0])],
axis=0))
else:
return embedded
# TODO(allenl, agarwal): Consider better ways of warm-starting predictions.
def predict(self, features):
"""Computes predictions multiple steps into the future.
Args:
features: A dictionary with the following key/value pairs:
PredictionFeatures.TIMES: A [batch size, predict window size]
integer Tensor of times, after the window of data indicated by
`STATE_TUPLE`, to make predictions for.
PredictionFeatures.STATE_TUPLE: A tuple of (times, values), times with
shape [batch size, self.input_window_size], values with shape [batch
size, self.input_window_size, self.num_features] representing a
segment of the time series before `TIMES`. This data is used
to start of the autoregressive computation. This should have data for
at least self.input_window_size timesteps.
And any exogenous features, with shapes prefixed by shape of `TIMES`.
Returns:
A dictionary with keys, "mean", "covariance". The
values are Tensors of shape [batch_size, predict window size,
num_features] and correspond to the values passed in `TIMES`.
"""
if not self._graph_initialized:
self.initialize_graph()
predict_times = math_ops.cast(
ops.convert_to_tensor(features[PredictionFeatures.TIMES]), dtypes.int32)
exogenous_regressors = self._process_exogenous_features(
times=predict_times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES,
PredictionFeatures.STATE_TUPLE]})
with ops.control_dependencies(
[check_ops.assert_equal(array_ops.shape(predict_times)[1],
array_ops.shape(exogenous_regressors)[1])]):
exogenous_regressors = array_ops.identity(exogenous_regressors)
batch_size = array_ops.shape(predict_times)[0]
num_predict_values = array_ops.shape(predict_times)[1]
prediction_iterations = ((num_predict_values + self.output_window_size - 1)
// self.output_window_size)
# Pad predict_times and exogenous regressors so as to have exact multiple of
# self.output_window_size values per example.
padding_size = (prediction_iterations * self.output_window_size -
num_predict_values)
predict_times = array_ops.pad(
predict_times, [[0, 0], [0, padding_size]])
exogenous_regressors = array_ops.pad(
exogenous_regressors, [[0, 0], [0, padding_size], [0, 0]])
state = features[PredictionFeatures.STATE_TUPLE]
(state_times, state_values, state_exogenous_regressors) = state
state_times = math_ops.cast(
ops.convert_to_tensor(state_times), dtypes.int32)
state_values = ops.convert_to_tensor(state_values, dtype=self.dtype)
state_exogenous_regressors = ops.convert_to_tensor(
state_exogenous_regressors, dtype=self.dtype)
initial_input_times = predict_times[:, :self.output_window_size]
initial_input_exogenous_regressors = (
exogenous_regressors[:, :self.output_window_size, :])
if self.input_window_size > 0:
initial_input_times = array_ops.concat(
[state_times[:, -self.input_window_size:], initial_input_times], 1)
values_size = array_ops.shape(state_values)[1]
times_size = array_ops.shape(state_times)[1]
with ops.control_dependencies([
check_ops.assert_greater_equal(values_size, self.input_window_size),
check_ops.assert_equal(values_size, times_size)
]):
initial_input_values = state_values[:, -self.input_window_size:, :]
initial_input_exogenous_regressors = array_ops.concat(
[state_exogenous_regressors[:, -self.input_window_size:, :],
initial_input_exogenous_regressors[
:, :self.output_window_size, :]],
axis=1)
else:
initial_input_values = 0
# Iterate over the predict_times, predicting self.output_window_size values
# in each iteration.
def _while_condition(iteration_number, *unused_args):
return math_ops.less(iteration_number, prediction_iterations)
def _while_body(iteration_number, input_times, input_values,
input_exogenous_regressors, mean_ta, covariance_ta):
"""Predict self.output_window_size values."""
prediction_ops = self.prediction_ops(
input_times, input_values, input_exogenous_regressors)
predicted_mean = prediction_ops["mean"]
predicted_covariance = prediction_ops["covariance"]
offset = self.output_window_size * gen_math_ops.minimum(
iteration_number + 1, prediction_iterations - 1)
if self.input_window_size > 0:
if self.output_window_size < self.input_window_size:
new_input_values = array_ops.concat(
[input_values[:, self.output_window_size:, :], predicted_mean], 1)
new_input_exogenous_regressors = array_ops.concat(
[input_exogenous_regressors[:, -self.input_window_size:, :],
exogenous_regressors[
:, offset:offset + self.output_window_size, :]],
axis=1)
new_input_times = array_ops.concat([
input_times[:, -self.input_window_size:],
predict_times[:, offset:offset + self.output_window_size]
], 1)
else:
new_input_values = predicted_mean[:, -self.input_window_size:, :]
new_input_exogenous_regressors = exogenous_regressors[
:,
offset - self.input_window_size:offset + self.output_window_size,
:]
new_input_times = predict_times[
:,
offset - self.input_window_size:offset + self.output_window_size]
else:
new_input_values = input_values
new_input_exogenous_regressors = exogenous_regressors[
:, offset:offset + self.output_window_size, :]
new_input_times = predict_times[:,
offset:offset + self.output_window_size]
new_input_times.set_shape(initial_input_times.get_shape())
new_input_exogenous_regressors.set_shape(
initial_input_exogenous_regressors.get_shape())
new_mean_ta = mean_ta.write(iteration_number, predicted_mean)
if isinstance(covariance_ta, tensor_array_ops.TensorArray):
new_covariance_ta = covariance_ta.write(iteration_number,
predicted_covariance)
else:
new_covariance_ta = covariance_ta
return (iteration_number + 1,
new_input_times,
new_input_values,
new_input_exogenous_regressors,
new_mean_ta,
new_covariance_ta)
# Note that control_flow_ops.while_loop doesn't seem happy with None. Hence
# using 0 for cases where we don't want to predict covariance.
covariance_ta_init = (tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
if self.loss != ARModel.SQUARED_LOSS else 0.)
mean_ta_init = tensor_array_ops.TensorArray(
dtype=self.dtype, size=prediction_iterations)
_, _, _, _, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body, [
0,
initial_input_times,
initial_input_values,
initial_input_exogenous_regressors,
mean_ta_init,
covariance_ta_init
])
def _parse_ta(values_ta):
"""Helper function to parse the returned TensorArrays."""
if not isinstance(values_ta, tensor_array_ops.TensorArray):
return None
predictions_length = prediction_iterations * self.output_window_size
# Shape [prediction_iterations, batch_size, self.output_window_size,
# self.num_features]
values_packed = values_ta.stack()
# Transpose to move batch dimension outside.
output_values = array_ops.reshape(
array_ops.transpose(values_packed, [1, 0, 2, 3]),
array_ops.stack([batch_size, predictions_length, -1]))
# Clip to desired size
return output_values[:, :num_predict_values, :]
predicted_mean = _parse_ta(mean_ta)
predicted_covariance = _parse_ta(covariance_ta)
if predicted_covariance is None:
predicted_covariance = array_ops.ones_like(predicted_mean)
# Transform and scale the mean and covariance appropriately.
predicted_mean = self._scale_back_data(predicted_mean)
predicted_covariance = self._scale_back_variance(predicted_covariance)
return {"mean": predicted_mean,
"covariance": predicted_covariance}
def _process_window(self, features, mode, exogenous_regressors):
"""Compute model outputs on a single window of data."""
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
exogenous_regressors = math_ops.cast(exogenous_regressors, dtype=self.dtype)
original_values = values
# Extra shape checking for the window size (above that in
# `head.create_estimator_spec`).
expected_times_shape = [None, self.window_size]
if not times.get_shape().is_compatible_with(expected_times_shape):
raise ValueError(
("ARModel with input_window_size={input_window_size} "
"and output_window_size={output_window_size} expects "
"feature '{times_feature}' to have shape (batch_size, "
"{window_size}) (for any batch_size), but got shape {times_shape}. "
"If you are using RandomWindowInputFn, set "
"window_size={window_size} or adjust the input_window_size and "
"output_window_size arguments to ARModel.").format(
input_window_size=self.input_window_size,
output_window_size=self.output_window_size,
times_feature=TrainEvalFeatures.TIMES,
window_size=self.window_size,
times_shape=times.get_shape()))
values = self._scale_data(values)
if self.input_window_size > 0:
input_values = values[:, :self.input_window_size, :]
else:
input_values = None
prediction_ops = self.prediction_ops(
times, input_values, exogenous_regressors)
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
targets = array_ops.slice(values, [0, self.input_window_size, 0],
[-1, -1, -1])
targets.get_shape().assert_is_compatible_with(prediction.get_shape())
if (mode == estimator_lib.ModeKeys.EVAL
and self.loss == ARModel.SQUARED_LOSS):
# Report an evaluation loss which matches the expected
# (observed - predicted) ** 2.
# Note that this affects only evaluation; the training loss is unaffected.
loss = self.loss_op(
self._scale_back_data(targets),
{"mean": self._scale_back_data(prediction_ops["mean"])})
else:
loss = self.loss_op(targets, prediction_ops)
# Scale back the prediction.
prediction = self._scale_back_data(prediction)
covariance = self._scale_back_variance(covariance)
return model.ModelOutputs(
loss=loss,
end_state=(times[:, -self.input_window_size:],
values[:, -self.input_window_size:, :],
exogenous_regressors[:, -self.input_window_size:, :]),
predictions={"mean": prediction, "covariance": covariance,
"observed": original_values[:, -self.output_window_size:]},
prediction_times=times[:, -self.output_window_size:])
def get_batch_loss(self, features, mode, state):
"""Computes predictions and a loss.
Args:
features: A dictionary (such as is produced by a chunker) with the
following key/value pairs (shapes are given as required for training):
TrainEvalFeatures.TIMES: A [batch size, self.window_size] integer
Tensor with times for each observation. To train on longer
sequences, the data should first be chunked.
TrainEvalFeatures.VALUES: A [batch size, self.window_size,
self.num_features] Tensor with values for each observation.
When evaluating, `TIMES` and `VALUES` must have a window size of at
least self.window_size, but it may be longer, in which case the last
window_size - self.input_window_size times (or fewer if this is not
divisible by self.output_window_size) will be evaluated on with
non-overlapping output windows (and will have associated
predictions). This is primarily to support qualitative
evaluation/plotting, and is not a recommended way to compute evaluation
losses (since there is no overlap in the output windows, which for
window-based models is an undesirable bias).
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
state: Unused
Returns:
A model.ModelOutputs object.
Raises:
ValueError: If `mode` is not TRAIN or EVAL, or if static shape information
is incorrect.
"""
features = {feature_name: ops.convert_to_tensor(feature_value)
for feature_name, feature_value in features.items()}
times = features[TrainEvalFeatures.TIMES]
exogenous_regressors = self._process_exogenous_features(
times=times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES,
PredictionFeatures.STATE_TUPLE]})
if mode == estimator_lib.ModeKeys.TRAIN:
# For training, we require the window size to be self.window_size as
# iterating sequentially on larger windows could introduce a bias.
return self._process_window(
features, mode=mode, exogenous_regressors=exogenous_regressors)
elif mode == estimator_lib.ModeKeys.EVAL:
# For evaluation, we allow the user to pass in a larger window, in which
# case we try to cover as much of the window as possible without
# overlap. Quantitative evaluation is more efficient/correct with fixed
# windows matching self.window_size (as with training), but this looping
# allows easy plotting of "in-sample" predictions.
times.get_shape().assert_has_rank(2)
static_window_size = times.get_shape()[1].value
if (static_window_size is not None
and static_window_size < self.window_size):
raise ValueError(
("ARModel requires a window of at least input_window_size + "
"output_window_size to evaluate on (input_window_size={}, "
"output_window_size={}, and got shape {} for feature '{}' (batch "
"size, window size)).").format(
self.input_window_size, self.output_window_size,
times.get_shape(), TrainEvalFeatures.TIMES))
num_iterations = ((array_ops.shape(times)[1] - self.input_window_size)
// self.output_window_size)
output_size = num_iterations * self.output_window_size
# Rather than dealing with overlapping windows of output, discard a bit at
# the beginning if output windows don't cover evenly.
crop_length = output_size + self.input_window_size
features = {feature_name: feature_value[:, -crop_length:]
for feature_name, feature_value in features.items()}
# Note that, unlike the ARModel's predict() while_loop and the
# SequentialTimeSeriesModel while_loop, each iteration here can run in
# parallel, since we are not feeding predictions or state from previous
# iterations.
def _while_condition(iteration_number, loss_ta, mean_ta, covariance_ta):
del loss_ta, mean_ta, covariance_ta # unused
return iteration_number < num_iterations
def _while_body(iteration_number, loss_ta, mean_ta, covariance_ta):
"""Perform a processing step on a single window of data."""
base_offset = iteration_number * self.output_window_size
model_outputs = self._process_window(
features={
feature_name:
feature_value[:, base_offset:base_offset + self.window_size]
for feature_name, feature_value in features.items()},
mode=mode,
exogenous_regressors=exogenous_regressors[
:, base_offset:base_offset + self.window_size])
# This code needs to be updated if new predictions are added in
# self._process_window
assert len(model_outputs.predictions) == 3
assert "mean" in model_outputs.predictions
assert "covariance" in model_outputs.predictions
assert "observed" in model_outputs.predictions
return (iteration_number + 1,
loss_ta.write(
iteration_number, model_outputs.loss),
mean_ta.write(
iteration_number, model_outputs.predictions["mean"]),
covariance_ta.write(
iteration_number, model_outputs.predictions["covariance"]))
_, loss_ta, mean_ta, covariance_ta = control_flow_ops.while_loop(
_while_condition, _while_body,
[0,
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations),
tensor_array_ops.TensorArray(dtype=self.dtype, size=num_iterations)])
values = math_ops.cast(features[TrainEvalFeatures.VALUES],
dtype=self.dtype)
batch_size = array_ops.shape(times)[0]
prediction_shape = [batch_size, self.output_window_size * num_iterations,
self.num_features]
(previous_state_times,
previous_state_values,
previous_state_exogenous_regressors) = state
# Make sure returned state always has windows of self.input_window_size,
# even if we were passed fewer than self.input_window_size points this
# time.
if self.input_window_size > 0:
new_state_times = array_ops.concat(
[previous_state_times,
math_ops.cast(times, dtype=dtypes.int64)],
axis=1)[:, -self.input_window_size:]
new_state_times.set_shape((None, self.input_window_size))
new_state_values = array_ops.concat(
[previous_state_values,
self._scale_data(values)], axis=1)[:, -self.input_window_size:, :]
new_state_values.set_shape((None, self.input_window_size,
self.num_features))
new_exogenous_regressors = array_ops.concat(
[previous_state_exogenous_regressors,
exogenous_regressors], axis=1)[:, -self.input_window_size:, :]
new_exogenous_regressors.set_shape(
(None,
self.input_window_size,
self.exogenous_size))
else:
# There is no state to keep, and the strided slices above do not handle
# input_window_size=0.
new_state_times = previous_state_times
new_state_values = previous_state_values
new_exogenous_regressors = previous_state_exogenous_regressors
return model.ModelOutputs(
loss=math_ops.reduce_mean(loss_ta.stack(), axis=0),
end_state=(new_state_times,
new_state_values,
new_exogenous_regressors),
predictions={
"mean": array_ops.reshape(
array_ops.transpose(mean_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"covariance": array_ops.reshape(
array_ops.transpose(covariance_ta.stack(), [1, 0, 2, 3]),
prediction_shape),
"observed": values[:, -output_size:]},
prediction_times=times[:, -output_size:])
else:
raise ValueError(
"Unknown mode '{}' passed to get_batch_loss.".format(mode))
def _compute_time_features(self, time):
"""Compute some features on the time value."""
batch_size = array_ops.shape(time)[0]
num_periods = len(self._periods)
# Reshape to 3D.
periods = constant_op.constant(
self._periods, shape=[1, 1, num_periods, 1], dtype=time.dtype)
time = array_ops.reshape(time, [batch_size, -1, 1, 1])
window_offset = time / self._periods
# Cast to appropriate type and scale to [0, 1) range
mod = (math_ops.cast(time % periods, self.dtype) * self._buckets /
math_ops.cast(periods, self.dtype))
# Bucketize based on some fixed width intervals. For a value t and interval
# [a, b), we return (t - a) if a <= t < b, else 0.
intervals = array_ops.reshape(
math_ops.range(self._buckets, dtype=self.dtype),
[1, 1, 1, self._buckets])
mod = nn_ops.relu(mod - intervals)
mod = array_ops.where(mod < 1.0, mod, array_ops.zeros_like(mod))
return window_offset, mod
class AnomalyMixtureARModel(ARModel):
"""Model data as a mixture of normal and anomaly distributions.
Note that this model works by changing the loss function to reduce the penalty
when predicting an anomalous target. However the predictions are still based
on anomalous input features, and this may affect the quality of fit. One
possible solution is to downweight/filter anomalous inputs, but that requires
more sequential processing instead of completely random windows.
"""
GAUSSIAN_ANOMALY = "gaussian"
CAUCHY_ANOMALY = "cauchy"
def __init__(self,
periodicities,
anomaly_prior_probability,
input_window_size,
output_window_size,
num_features,
prediction_model_factory=FlatPredictionModel,
anomaly_distribution=GAUSSIAN_ANOMALY,
num_time_buckets=10,
exogenous_feature_columns=None):
assert (anomaly_prior_probability < 1.0 and
anomaly_prior_probability > 0.0)
self._anomaly_prior_probability = anomaly_prior_probability
assert anomaly_distribution in [
AnomalyMixtureARModel.GAUSSIAN_ANOMALY,
AnomalyMixtureARModel.CAUCHY_ANOMALY]
self._anomaly_distribution = anomaly_distribution
super(AnomalyMixtureARModel, self).__init__(
periodicities=periodicities,
num_features=num_features,
num_time_buckets=num_time_buckets,
input_window_size=input_window_size,
output_window_size=output_window_size,
loss=ARModel.NORMAL_LIKELIHOOD_LOSS,
prediction_model_factory=prediction_model_factory,
exogenous_feature_columns=exogenous_feature_columns)
def _create_anomaly_ops(self, times, values, prediction_ops_dict):
anomaly_log_param = variable_scope.get_variable(
"anomaly_log_param",
shape=[],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
# Anomaly param is the variance for Gaussian and scale for Cauchy
# distribution.
prediction_ops_dict["anomaly_params"] = gen_math_ops.exp(anomaly_log_param)
def prediction_ops(self, times, values, exogenous_regressors):
prediction_ops_dict = super(AnomalyMixtureARModel, self).prediction_ops(
times, values, exogenous_regressors)
self._create_anomaly_ops(times, values, prediction_ops_dict)
return prediction_ops_dict
def _anomaly_log_prob(self, targets, prediction_ops):
prediction = prediction_ops["mean"]
if self._anomaly_distribution == AnomalyMixtureARModel.GAUSSIAN_ANOMALY:
anomaly_variance = prediction_ops["anomaly_params"]
anomaly_sigma = math_ops.sqrt(
gen_math_ops.maximum(anomaly_variance, 1e-5))
normal = distributions.Normal(loc=targets, scale=anomaly_sigma)
log_prob = normal.log_prob(prediction)
else:
assert self._anomaly_distribution == AnomalyMixtureARModel.CAUCHY_ANOMALY
anomaly_scale = prediction_ops["anomaly_params"]
cauchy = distributions.StudentT(
df=array_ops.ones([], dtype=anomaly_scale.dtype),
loc=targets,
scale=anomaly_scale)
log_prob = cauchy.log_prob(prediction)
return log_prob
def loss_op(self, targets, prediction_ops):
"""Create loss_op."""
prediction = prediction_ops["mean"]
covariance = prediction_ops["covariance"]
# Normal data log probability.
sigma = math_ops.sqrt(gen_math_ops.maximum(covariance, 1e-5))
normal1 = distributions.Normal(loc=targets, scale=sigma)
log_prob1 = normal1.log_prob(prediction)
log_prob1 += math_ops.log(1 - self._anomaly_prior_probability)
# Anomaly log probability.
log_prob2 = self._anomaly_log_prob(targets, prediction_ops)
log_prob2 += math_ops.log(self._anomaly_prior_probability)
# We need to compute log(exp(log_prob1) + exp(log_prob2). For numerical
# stability, we rewrite the expression as below.
p1 = gen_math_ops.minimum(log_prob1, log_prob2)
p2 = gen_math_ops.maximum(log_prob1, log_prob2)
mixed_log_prob = p2 + math_ops.log(1 + gen_math_ops.exp(p1 - p2))
loss_op = -math_ops.reduce_sum(mixed_log_prob)
loss_op /= math_ops.cast(
math_ops.reduce_prod(array_ops.shape(targets)), self.dtype)
return loss_op
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2019_08_01/aio/operations/_domain_registration_provider_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainRegistrationProviderOperations:
"""DomainRegistrationProviderOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_operations(
self,
**kwargs: Any
) -> AsyncIterable["_models.CsmOperationCollection"]:
"""Implements Csm operations Api to exposes the list of available Csm Apis under the resource provider.
Description for Implements Csm operations Api to exposes the list of available Csm Apis under
the resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2019_08_01.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CsmOperationCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.DomainRegistration/operations'} # type: ignore
|
nicolargo/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/hgext/largefiles/proto.py
|
92
|
# Copyright 2011 Fog Creek Software
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import urllib2
from mercurial import error, httppeer, util, wireproto
from mercurial.wireproto import batchable, future
from mercurial.i18n import _
import lfutil
LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
'\n\nPlease enable it in your Mercurial config '
'file.\n')
# these will all be replaced by largefiles.uisetup
capabilitiesorig = None
ssholdcallstream = None
httpoldcallstream = None
def putlfile(repo, proto, sha):
'''Put a largefile into a repository's local store and into the
user cache.'''
proto.redirect()
path = lfutil.storepath(repo, sha)
util.makedirs(os.path.dirname(path))
tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
try:
try:
proto.getfile(tmpfp)
tmpfp._fp.seek(0)
if sha != lfutil.hexsha1(tmpfp._fp):
raise IOError(0, _('largefile contents do not match hash'))
tmpfp.close()
lfutil.linktousercache(repo, sha)
except IOError, e:
repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
(sha, e.strerror))
return wireproto.pushres(1)
finally:
tmpfp.discard()
return wireproto.pushres(0)
def getlfile(repo, proto, sha):
'''Retrieve a largefile from the repository-local cache or system
cache.'''
filename = lfutil.findfile(repo, sha)
if not filename:
raise util.Abort(_('requested largefile %s not present in cache') % sha)
f = open(filename, 'rb')
length = os.fstat(f.fileno())[6]
# Since we can't set an HTTP content-length header here, and
# Mercurial core provides no way to give the length of a streamres
# (and reading the entire file into RAM would be ill-advised), we
# just send the length on the first line of the response, like the
# ssh proto does for string responses.
def generator():
yield '%d\n' % length
for chunk in util.filechunkiter(f):
yield chunk
return wireproto.streamres(generator())
def statlfile(repo, proto, sha):
'''Return '2\n' if the largefile is missing, '0\n' if it seems to be in
good condition.
The value 1 is reserved for mismatched checksum, but that is too expensive
to be verified on every stat and must be caught be running 'hg verify'
server side.'''
filename = lfutil.findfile(repo, sha)
if not filename:
return '2\n'
return '0\n'
def wirereposetup(ui, repo):
class lfileswirerepository(repo.__class__):
def putlfile(self, sha, fd):
# unfortunately, httprepository._callpush tries to convert its
# input file-like into a bundle before sending it, so we can't use
# it ...
if issubclass(self.__class__, httppeer.httppeer):
res = None
try:
res = self._call('putlfile', data=fd, sha=sha,
headers={'content-type':'application/mercurial-0.1'})
d, output = res.split('\n', 1)
for l in output.splitlines(True):
self.ui.warn(_('remote: '), l, '\n')
return int(d)
except (ValueError, urllib2.HTTPError):
self.ui.warn(_('unexpected putlfile response: %s') % res)
return 1
# ... but we can't use sshrepository._call because the data=
# argument won't get sent, and _callpush does exactly what we want
# in this case: send the data straight through
else:
try:
ret, output = self._callpush("putlfile", fd, sha=sha)
if ret == "":
raise error.ResponseError(_('putlfile failed:'),
output)
return int(ret)
except IOError:
return 1
except ValueError:
raise error.ResponseError(
_('putlfile failed (unexpected response):'), ret)
def getlfile(self, sha):
"""returns an iterable with the chunks of the file with sha sha"""
stream = self._callstream("getlfile", sha=sha)
length = stream.readline()
try:
length = int(length)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"),
length))
# SSH streams will block if reading more than length
for chunk in util.filechunkiter(stream, 128 * 1024, length):
yield chunk
# HTTP streams must hit the end to process the last empty
# chunk of Chunked-Encoding so the connection can be reused.
if issubclass(self.__class__, httppeer.httppeer):
chunk = stream.read(1)
if chunk:
self._abort(error.ResponseError(_("unexpected response:"),
chunk))
@batchable
def statlfile(self, sha):
f = future()
result = {'sha': sha}
yield result, f
try:
yield int(f.value)
except (ValueError, urllib2.HTTPError):
# If the server returns anything but an integer followed by a
# newline, newline, it's not speaking our language; if we get
# an HTTP error, we can't be sure the largefile is present;
# either way, consider it missing.
yield 2
repo.__class__ = lfileswirerepository
# advertise the largefiles=serve capability
def capabilities(repo, proto):
return capabilitiesorig(repo, proto) + ' largefiles=serve'
def heads(repo, proto):
if lfutil.islfilesrepo(repo):
return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
return wireproto.heads(repo, proto)
def sshrepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
def httprepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return httpoldcallstream(self, cmd, **args)
|
QuantCrimAtLeeds/PredictCode
|
refs/heads/master
|
tests/gui/predictors/stscan_test.py
|
1
|
from .helper import *
import open_cp.gui.predictors.stscan as stscan
import datetime
def test_STScan(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
assert provider.settings_string == "geo(50%/3000m) time(50%/60days)"
standard_calls(provider, project_task, analysis_model, grid_task)
def test_ProHotspot_serialise(model, project_task, analysis_model, grid_task):
serialise( stscan.STScan(model) )
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_subtask(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,6,7))
open_cp.stscan.STSTrainer.assert_called_with()
pred_mock = open_cp.stscan.STSTrainer.return_value
assert pred_mock.geographic_population_limit == 0.5
assert pred_mock.geographic_radius_limit == 3000
assert pred_mock.time_population_limit == 0.5
assert pred_mock.time_max_interval == np.timedelta64(60, "D")
np.testing.assert_array_equal(pred_mock.data.timestamps, [np.datetime64("2017-05-21T12:30"),
np.datetime64("2017-05-21T13:00"), np.datetime64("2017-05-21T13:30")])
np.testing.assert_array_equal(pred_mock.data.xcoords, [0,10,20])
np.testing.assert_array_equal(pred_mock.data.ycoords, [10,20,0])
assert pred_mock.region.xmin == 25
assert pred_mock.region.ymin == 35
assert pred_mock.region.xmax == 25 + 50 * 15
assert pred_mock.region.ymax == 35 + 50 * 10
predict_date = np.datetime64(datetime.datetime(2017,6,7))
pred_mock.predict.assert_called_with(time=predict_date)
result = pred_mock.predict.return_value
result.grid_prediction.assert_called_with(50, use_maximal_clusters=False)
assert prediction is result.grid_prediction.return_value
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_cluster_options(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.geographic_population_limit = 45
provider.time_population_limit = 55
provider.geographic_radius_limit = 1234
provider.time_max_interval = datetime.timedelta(days=23)
assert provider.settings_string == "geo(45%/1234m) time(55%/23days)"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "geo(45%/1234m) time(55%/23days)"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,6,7))
pred_mock = open_cp.stscan.STSTrainer.return_value
assert pred_mock.geographic_population_limit == 0.45
assert pred_mock.geographic_radius_limit == 1234
assert pred_mock.time_population_limit == 0.55
assert pred_mock.time_max_interval == np.timedelta64(23, "D")
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_from_training_start(model, project_task, analysis_model, grid_task):
analysis_model.time_range = (datetime.datetime(2017,5,21,12,59),None,None,None)
provider = stscan.STScan(model)
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,6,7))
pred_mock = open_cp.stscan.STSTrainer.return_value
np.testing.assert_array_equal(pred_mock.data.timestamps, [
np.datetime64("2017-05-21T13:00"), np.datetime64("2017-05-21T13:30")])
np.testing.assert_array_equal(pred_mock.data.xcoords, [10,20])
np.testing.assert_array_equal(pred_mock.data.ycoords, [20,0])
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_time_window(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.time_window_choice = 2
provider.time_window_length = datetime.timedelta(days=13)
assert provider.settings_string == "<=13days geo(50%/3000m) time(50%/60days)"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "<=13days geo(50%/3000m) time(50%/60days)"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,5,21,13,29) + datetime.timedelta(days=13))
pred_mock = open_cp.stscan.STSTrainer.return_value
np.testing.assert_array_equal(pred_mock.data.timestamps, [
np.datetime64("2017-05-21T13:30")])
np.testing.assert_array_equal(pred_mock.data.xcoords, [20])
np.testing.assert_array_equal(pred_mock.data.ycoords, [0])
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_quant_grid(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.quantisation_choice = 2
assert provider.settings_string == "grid geo(50%/3000m) time(50%/60days)"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "grid geo(50%/3000m) time(50%/60days)"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,5,21,13,31))
pred_mock = open_cp.stscan.STSTrainer.return_value
np.testing.assert_array_equal(pred_mock.data.timestamps, [np.datetime64("2017-05-21T12:30"),
np.datetime64("2017-05-21T13:00"), np.datetime64("2017-05-21T13:30")])
# Grid is (25,35) size 50
np.testing.assert_array_equal(pred_mock.data.xcoords, [0,0,0])
np.testing.assert_array_equal(pred_mock.data.ycoords, [10,10,10])
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_quant_time(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.quantisation_choice = 3
provider.time_bin_length = datetime.timedelta(days=2)
assert provider.settings_string == "bins(48hours) geo(50%/3000m) time(50%/60days)"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "bins(48hours) geo(50%/3000m) time(50%/60days)"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,5,21,13,0))
pred_mock = open_cp.stscan.STSTrainer.return_value
np.testing.assert_array_equal(pred_mock.data.timestamps, [np.datetime64("2017-05-19T13:00"),
np.datetime64("2017-05-21T13:00"), np.datetime64("2017-05-21T13:00")])
np.testing.assert_array_equal(pred_mock.data.xcoords, [0,10,20])
np.testing.assert_array_equal(pred_mock.data.ycoords, [10,20,0])
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_quant_both(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.quantisation_choice = 4
provider.time_bin_length = datetime.timedelta(days=1)
assert provider.settings_string == "grid bins(24hours) geo(50%/3000m) time(50%/60days)"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "grid bins(24hours) geo(50%/3000m) time(50%/60days)"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
prediction = subtask(datetime.datetime(2017,5,21,13,0))
pred_mock = open_cp.stscan.STSTrainer.return_value
np.testing.assert_array_equal(pred_mock.data.timestamps, [np.datetime64("2017-05-20T13:00"),
np.datetime64("2017-05-21T13:00"), np.datetime64("2017-05-21T13:00")])
np.testing.assert_array_equal(pred_mock.data.xcoords, [0,0,0])
np.testing.assert_array_equal(pred_mock.data.ycoords, [10,10,10])
@mock.patch("open_cp.stscan.STSTrainer")
def test_STScan_max_clusters(model, project_task, analysis_model, grid_task):
provider = stscan.STScan(model)
provider.cluster_option = 2
assert provider.settings_string == "geo(50%/3000m) time(50%/60days) max"
data = provider.to_dict()
json_str = json.dumps(data)
provider.from_dict(json.loads(json_str))
assert provider.settings_string == "geo(50%/3000m) time(50%/60days) max"
subtask = standard_calls(provider, project_task, analysis_model, grid_task)
subtask(datetime.datetime(2017,5,21,13,0))
pred_mock = open_cp.stscan.STSTrainer.return_value
result = pred_mock.predict.return_value
result.grid_prediction.assert_called_with(50, use_maximal_clusters=True)
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/numpy/core/machar.py
|
141
|
"""
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core.numeric import errstate
# Need to speed this up...especially for longfloat
class MachAr(object):
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, substracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating point number ``beta**minexp`` (the smallest [in
magnitude] usable floating value).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
Same as `xmin`.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
ypu/tp-qemu
|
refs/heads/master
|
qemu/tests/timerdevice_change_guest_clksource.py
|
3
|
import logging
import re
from autotest.client.shared import error
from virttest import data_dir, storage, utils_disk, env_process
@error.context_aware
def run(test, params, env):
"""
Timer device check guest after update kernel line without kvmclock:
1) Boot a guest with kvm-clock
2) Check the current clocksource in guest
3) Check the available clocksource in guest
4) Update "clocksource=" parameter in guest kernel cli
5) Boot guest system
6) Check the current clocksource in guest
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
def verify_guest_clock_source(session, expected):
error.context("Check the current clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not expected in session.cmd(cmd):
raise error.TestFail(
"Guest didn't use '%s' clocksource" % expected)
error.context("Boot a guest with kvm-clock", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
error.context("Check the current clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/current_clocksource"
if not "kvm-clock" in session.cmd(cmd):
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
if "clocksource=" not in session.cmd("cat %s" % grub_file):
raise error.TestFail("Guest didn't use 'kvm-clock' clocksource")
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to kvm-clock",
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to"
" kvm-clock only")
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=[a-z\- ]+", " ", kernel_cfg)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
vm_name = params["main_vm"]
cpu_model_flags = params.get("cpu_model_flags")
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Check the available clocksource in guest", logging.info)
cmd = "cat /sys/devices/system/clocksource/"
cmd += "clocksource0/available_clocksource"
try:
available_clksrc_list = session.cmd(cmd).splitlines()[-1].split()
available_clksrc_list = [_.strip() for _ in available_clksrc_list]
except Exception, detail:
raise error.TestFail("Couldn't get guest available clock source."
" Detail: '%s'" % detail)
try:
for clksrc in available_clksrc_list:
error.context("Shutdown guest")
vm.destroy()
env.unregister_vm(vm.name)
error.context("Update guest kernel cli to '%s'" % clksrc,
logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
logging.warn("Update the first kernel entry to"
" '%s' only" % clksrc)
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub("clocksource=[a-z \-_]+",
"clocksource=%s " % clksrc, kernel_cfg)
else:
kernel_cfg_new = "%s %s" % (kernel_cfg,
"clocksource=%s" % clksrc)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
error.context("Boot the guest", logging.info)
if clksrc != "kvm-clock":
cpu_model_flags = params.get("cpu_model_flags")
if "-kvmclock" not in cpu_model_flags:
params["cpu_model_flags"] = cpu_model_flags + ",-kvmclock"
vm_name = params["main_vm"]
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
error.context("Check the current clocksource in guest",
logging.info)
verify_guest_clock_source(session, clksrc)
finally:
try:
error.context("Shutdown guest")
vm.destroy()
error.context("Restore guest kernel cli", logging.info)
image_filename = storage.get_image_filename(params,
data_dir.get_data_dir())
grub_file = params.get("grub_file", "/boot/grub2/grub.cfg")
kernel_cfg_pattern = params.get("kernel_cfg_pos_reg",
r".*vmlinuz-\d+.*")
disk_obj = utils_disk.GuestFSModiDisk(image_filename)
kernel_cfg_original = disk_obj.read_file(grub_file)
try:
kernel_cfg = re.findall(kernel_cfg_pattern,
kernel_cfg_original)[0]
except IndexError, detail:
raise error.TestError("Couldn't find the kernel config, regex"
" pattern is '%s', detail: '%s'" %
(kernel_cfg_pattern, detail))
if "clocksource=" in kernel_cfg:
kernel_cfg_new = re.sub(
"clocksource=[a-z \-_]+", " ", kernel_cfg)
disk_obj.replace_image_file_content(grub_file, kernel_cfg,
kernel_cfg_new)
except Exception, detail:
logging.error("Failed to restore guest kernel cli."
" Detail: '%s'" % detail)
|
iamutkarshtiwari/sympy
|
refs/heads/master
|
sympy/integrals/__init__.py
|
59
|
"""Integration functions that integrates a sympy expression.
Examples
========
>>> from sympy import integrate, sin
>>> from sympy.abc import x
>>> integrate(1/x,x)
log(x)
>>> integrate(sin(x),x)
-cos(x)
"""
from .integrals import integrate, Integral, line_integrate
from .transforms import (mellin_transform, inverse_mellin_transform,
MellinTransform, InverseMellinTransform,
laplace_transform, inverse_laplace_transform,
LaplaceTransform, InverseLaplaceTransform,
fourier_transform, inverse_fourier_transform,
FourierTransform, InverseFourierTransform,
sine_transform, inverse_sine_transform,
SineTransform, InverseSineTransform,
cosine_transform, inverse_cosine_transform,
CosineTransform, InverseCosineTransform,
hankel_transform, inverse_hankel_transform,
HankelTransform, InverseHankelTransform)
|
eeshangarg/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/celery/tests/test_events/test_events_state.py
|
18
|
from __future__ import absolute_import
from time import time
from itertools import count
from celery import states
from celery.events import Event
from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE
from celery.utils import uuid
from celery.tests.utils import unittest
class replay(object):
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
def setup(self):
pass
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.events[self.position()])
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0).next
return self
def play(self):
for _ in self:
pass
class ev_worker_online_offline(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-offline", hostname="utest1"),
]
class ev_worker_heartbeats(replay):
def setup(self):
self.events = [
Event("worker-heartbeat", hostname="utest1",
timestamp=time() - HEARTBEAT_EXPIRE * 2),
Event("worker-heartbeat", hostname="utest1"),
]
class ev_task_states(replay):
def setup(self):
tid = self.tid = uuid()
self.events = [
Event("task-received", uuid=tid, name="task1",
args="(2, 2)", kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname="utest1"),
Event("task-started", uuid=tid, hostname="utest1"),
Event("task-revoked", uuid=tid, hostname="utest1"),
Event("task-retried", uuid=tid, exception="KeyError('bar')",
traceback="line 2 at main", hostname="utest1"),
Event("task-failed", uuid=tid, exception="KeyError('foo')",
traceback="line 1 at main", hostname="utest1"),
Event("task-succeeded", uuid=tid, result="4",
runtime=0.1234, hostname="utest1"),
]
class ev_snapshot(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-online", hostname="utest2"),
Event("worker-online", hostname="utest3"),
]
for i in range(20):
worker = not i % 2 and "utest2" or "utest1"
type = not i % 2 and "task2" or "task1"
self.events.append(Event("task-received", name=type,
uuid=uuid(), hostname=worker))
class test_Worker(unittest.TestCase):
def test_survives_missing_timestamp(self):
worker = Worker(hostname="foo")
worker.on_heartbeat(timestamp=None)
self.assertEqual(worker.heartbeats, [])
def test_repr(self):
self.assertTrue(repr(Worker(hostname="foo")))
class test_Task(unittest.TestCase):
def test_info(self):
task = Task(uuid="abcdefg",
name="tasks.add",
args="(2, 2)",
kwargs="{}",
retries=2,
result=42,
eta=1,
runtime=0.0001,
expires=1,
exception=1,
received=time() - 10,
started=time() - 8,
succeeded=time())
self.assertEqual(sorted(list(task._info_fields)),
sorted(task.info().keys()))
self.assertEqual(sorted(list(task._info_fields + ("received", ))),
sorted(task.info(extra=("received", ))))
self.assertEqual(sorted(["args", "kwargs"]),
sorted(task.info(["args", "kwargs"]).keys()))
def test_ready(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_received(timestamp=time())
self.assertFalse(task.ready)
task.on_succeeded(timestamp=time())
self.assertTrue(task.ready)
def test_sent(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_sent(timestamp=time())
self.assertEqual(task.state, states.PENDING)
def test_merge(self):
task = Task()
task.on_failed(timestamp=time())
task.on_started(timestamp=time())
task.on_received(timestamp=time(), name="tasks.add", args=(2, 2))
self.assertEqual(task.state, states.FAILURE)
self.assertEqual(task.name, "tasks.add")
self.assertTupleEqual(task.args, (2, 2))
task.on_retried(timestamp=time())
self.assertEqual(task.state, states.RETRY)
def test_repr(self):
self.assertTrue(repr(Task(uuid="xxx", name="tasks.add")))
class test_State(unittest.TestCase):
def test_repr(self):
self.assertTrue(repr(State()))
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
r.next()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
r.play()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
r.next()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
r.play()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
r.next()
self.assertTrue(r.tid in r.state.tasks)
task = r.state.tasks[r.tid]
self.assertEqual(task.state, states.RECEIVED)
self.assertTrue(task.received)
self.assertEqual(task.timestamp, task.received)
self.assertEqual(task.worker.hostname, "utest1")
# STARTED
r.next()
self.assertTrue(r.state.workers["utest1"].alive,
"any task event adds worker heartbeat")
self.assertEqual(task.state, states.STARTED)
self.assertTrue(task.started)
self.assertEqual(task.timestamp, task.started)
self.assertEqual(task.worker.hostname, "utest1")
# REVOKED
r.next()
self.assertEqual(task.state, states.REVOKED)
self.assertTrue(task.revoked)
self.assertEqual(task.timestamp, task.revoked)
self.assertEqual(task.worker.hostname, "utest1")
# RETRY
r.next()
self.assertEqual(task.state, states.RETRY)
self.assertTrue(task.retried)
self.assertEqual(task.timestamp, task.retried)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('bar')")
self.assertEqual(task.traceback, "line 2 at main")
# FAILURE
r.next()
self.assertEqual(task.state, states.FAILURE)
self.assertTrue(task.failed)
self.assertEqual(task.timestamp, task.failed)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('foo')")
self.assertEqual(task.traceback, "line 1 at main")
# SUCCESS
r.next()
self.assertEqual(task.state, states.SUCCESS)
self.assertTrue(task.succeeded)
self.assertEqual(task.timestamp, task.succeeded)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.result, "4")
self.assertEqual(task.runtime, 0.1234)
def assertStateEmpty(self, state):
self.assertFalse(state.tasks)
self.assertFalse(state.workers)
self.assertFalse(state.event_count)
self.assertFalse(state.task_count)
def assertState(self, state):
self.assertTrue(state.tasks)
self.assertTrue(state.workers)
self.assertTrue(state.event_count)
self.assertTrue(state.task_count)
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
self.assertFalse(s.event_count)
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
self.assertTrue(s2.event_count)
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
self.assertTrue(s.tasks)
s.clear_tasks(ready=False)
self.assertFalse(s.tasks)
def test_clear(self):
r = ev_snapshot(State())
r.play()
self.assertTrue(r.state.event_count)
self.assertTrue(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertTrue(r.state.task_count)
r.state.clear()
self.assertFalse(r.state.event_count)
self.assertFalse(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertFalse(r.state.task_count)
r.state.clear(False)
self.assertFalse(r.state.tasks)
def test_task_types(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(sorted(r.state.task_types()), ["task1", "task2"])
def test_tasks_by_timestamp(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_timestamp()), 20)
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_type("task1")), 10)
self.assertEqual(len(r.state.tasks_by_type("task2")), 10)
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.alive_workers()), 3)
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_worker("utest1")), 10)
self.assertEqual(len(r.state.tasks_by_worker("utest2")), 10)
def test_survives_unknown_worker_event(self):
s = State()
s.worker_event("worker-unknown-event-xxx", {"foo": "bar"})
s.worker_event("worker-unknown-event-xxx", {"hostname": "xxx",
"foo": "bar"})
def test_survives_unknown_task_event(self):
s = State()
s.task_event("task-unknown-event-xxx", {"foo": "bar",
"uuid": "x",
"hostname": "y"})
def test_callback(self):
scratch = {}
def callback(state, event):
scratch["recv"] = True
s = State(callback=callback)
s.event({"type": "worker-online"})
self.assertTrue(scratch.get("recv"))
|
faulkner/prospector
|
refs/heads/master
|
prospector/suppression.py
|
2
|
r"""
Each tool has its own method of ignoring errors and warnings.
For example, pylint requires a comment of the form
# pylint disable=<error codes>
PEP8 will not warn on lines with
# noqa
Additionally, flake8 follows that convention for pyflakes errors,
but pyflakes itself does not.
Finally, an entire file is ignored by flake8 if this line is found
in the file:
# flake8\: noqa (the \ is needed to stop prospector ignoring this file :))
This module's job is to attempt to collect all of these methods into
a single coherent list of error suppression locations.
"""
from collections import defaultdict
import os
import re
_FLAKE8_IGNORE_FILE = re.compile(r'flake8[:=]\s*noqa', re.IGNORECASE)
_PEP8_IGNORE_LINE = re.compile(r'#\s+noqa', re.IGNORECASE)
_PYLINT_SUPPRESSED_MESSAGE = re.compile(r'^Suppressed \'([a-z0-9-]+)\' \(from line \d+\)$')
def get_noqa_suppressions(file_contents):
"""
Finds all pep8/flake8 suppression messages
:param file_contents:
A list of file lines
:return:
A pair - the first is whether to ignore the whole file, the
second is a set of (0-indexed) line numbers to ignore.
"""
ignore_whole_file = False
ignore_lines = set()
for line_number, line in enumerate(file_contents):
if _FLAKE8_IGNORE_FILE.search(line):
ignore_whole_file = True
if _PEP8_IGNORE_LINE.search(line):
ignore_lines.add(line_number + 1)
return ignore_whole_file, ignore_lines
_PYLINT_EQUIVALENTS = {
# TODO: blending has this info already?
'unused-import': (
('pyflakes', 'FL0001'),
('frosted', 'E101'),
)
}
def _parse_pylint_informational(messages):
ignore_files = set()
ignore_messages = defaultdict(lambda: defaultdict(list))
for message in messages:
if message.source == 'pylint':
if message.code == 'suppressed-message':
# this is a message indicating that a message was raised
# by pylint but suppressed by configuration in the file
match = _PYLINT_SUPPRESSED_MESSAGE.match(message.message)
suppressed_code = match.group(1)
line_dict = ignore_messages[message.location.path]
line_dict[message.location.line].append(suppressed_code)
elif message.code == 'file-ignored':
ignore_files.add(message.location.path)
return ignore_files, ignore_messages
def get_suppressions(relative_filepaths, root, messages):
"""
Given every message which was emitted by the tools, and the
list of files to inspect, create a list of files to ignore,
and a map of filepath -> line-number -> codes to ignore
"""
paths_to_ignore = set()
lines_to_ignore = defaultdict(set)
messages_to_ignore = defaultdict(lambda: defaultdict(set))
# first deal with 'noqa' style messages
for filepath in relative_filepaths:
abspath = os.path.join(root, filepath)
with open(abspath) as modulefile:
file_contents = modulefile.readlines()
ignore_file, ignore_lines = get_noqa_suppressions(file_contents)
if ignore_file:
paths_to_ignore.add(filepath)
lines_to_ignore[filepath] |= ignore_lines
# now figure out which messages were suppressed by pylint
pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages)
paths_to_ignore |= pylint_ignore_files
for filepath, line in pylint_ignore_messages.items():
for line_number, codes in line.items():
for code in codes:
messages_to_ignore[filepath][line_number].add(('pylint', code))
if code in _PYLINT_EQUIVALENTS:
for equivalent in _PYLINT_EQUIVALENTS[code]:
messages_to_ignore[filepath][line_number].add(equivalent)
return paths_to_ignore, lines_to_ignore, messages_to_ignore
|
endlessm/chromium-browser
|
refs/heads/master
|
content/test/gpu/gold_inexact_matching/parameter_set.py
|
1
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class ParameterSet(object):
"""Struct-like object for holding parameters for an iteration."""
def __init__(self, max_diff, delta_threshold, edge_threshold):
"""
Args:
max_diff: The maximum number of pixels that are allowed to differ.
delta_threshold: The maximum per-channel delta sum that is allowed.
edge_threshold: The threshold for what is considered an edge for a
Sobel filter.
"""
self.max_diff = max_diff
self.delta_threshold = delta_threshold
self.edge_threshold = edge_threshold
def AsList(self):
"""Returns the object's data in list format.
The returned object is suitable for appending to a "goldctl match" command
in order to compare using the parameters stored within the object.
Returns:
A list of strings.
"""
return [
'--parameter',
'fuzzy_max_different_pixels:%d' % self.max_diff,
'--parameter',
'fuzzy_pixel_delta_threshold:%d' % self.delta_threshold,
'--parameter',
'sobel_edge_threshold:%d' % self.edge_threshold,
]
def __str__(self):
return ('Max different pixels: %d, Max per-channel delta sum: %d, Sobel '
'edge threshold: %d') % (self.max_diff, self.delta_threshold,
self.edge_threshold)
def __eq__(self, other):
return (self.max_diff == other.max_diff
and self.delta_threshold == other.delta_threshold
and self.edge_threshold == other.edge_threshold)
def __hash__(self):
return hash((self.max_diff, self.delta_threshold, self.edge_threshold))
|
ccn-2m/django
|
refs/heads/master
|
django/conf/locale/de/formats.py
|
107
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
shaialon/vim-awesome
|
refs/heads/master
|
db/categories.py
|
5
|
import collections
import os
import rethinkdb as r
import yaml
import db.util
r_conn = db.util.r_conn
def get_all():
filename = os.path.join(os.path.dirname(__file__), 'categories.yaml')
with open(filename) as f:
categories = yaml.safe_load(f)
_aggregate_category_tags(categories)
return categories
def _aggregate_category_tags(categories):
"""Mutates categories with the tags that belong to each category.
For each category, we derive all the tags that belong to that category by
merging the tags of all the plugins of that category.
"""
for category in categories:
category_plugins = r.table('plugins').filter(
{'category': category['id']}).pluck('tags').run(r_conn())
tags_counter = collections.Counter()
for plugin in category_plugins:
tags_counter.update(plugin['tags'])
category['tags'] = [
{'id': k, 'count': v} for k, v in tags_counter.most_common()]
|
wshallum/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/auth_basic.py
|
39
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
api_url:
required: false
default: null
description:
- The resolvable endpoint for the API
api_username:
required: false
default: null
description:
- The username to use for authentication against the API
api_password:
required: false
default: null
description:
- The password to use for authentication against the API
validate_certs:
required: false
default: yes
description:
- Whether or not to validate SSL certs when supplying a https endpoint.
"""
|
kartikluke/chammakbot
|
refs/heads/master
|
vendored/urllib3/util/ssl_.py
|
87
|
from __future__ import absolute_import
import errno
import warnings
import hmac
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer TLS 1.3 cipher suites
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif getattr(context, 'load_default_certs', None) is not None:
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Subject Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
|
poddus/gtrnkvrkf
|
refs/heads/master
|
alchemy/initialize_db.py
|
1
|
from config import *
from sqlalchemy import Table, Column, Integer, Float, String, DateTime, MetaData, join, ForeignKey
from sqlalchemy.orm import relationship
class Product(Base):
"""
Common base class for all products.
A 'unit' is the smallest deliverable unit.
"""
__tablename__ = "tblProducts"
artNum = Column(Integer, primary_key=True, autoincrement=False)
name = Column(String(32))
bottlesPerUnit = Column(Integer)
cratesPerUnit = Column(Integer)
bottlePfand = Column(Float)
def __repr__(self):
# print products as tabulate table
table = []
table.append(["Artikel#", self.artNum])
table.append(["Name", self.name])
table.append(["Fl pro E", self.bottlesPerUnit])
table.append(["Ka pro E", self.cratesPerUnit])
table.append(["Pfand pro Fl", self.bottlePfand])
return (tabulate(table, numalign="center"))
# unit cost no longer stored in Product class/tbl. define function anyway (db-query StockTake) for convenience?
# def get_cost_MwSt(self):
# return self.unitCost*1.19
#
# def get_bottle_price(self):
# return round(((self.get_cost_MwSt() / self.bottlesPerUnit) + 0.1), 2)
"""----------------------------------------------------------------------------------------------"""
class Order(Base):
__tablename__ = "tblOrder"
orderID = Column(Integer, primary_key=True)
timestamp = Column(Integer) # how does this one work?
note = Column(String)
def get_total(self):
# add up subtotals from OrderDetail.get_subtotals(), return a nice table
pass
# http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#building-a-relationship
orderdetail = relationship("OrderDetail")
class OrderDetail(Base):
__tablename__ = "tblOrderDetail"
orderDetailID = Column(Integer, primary_key=True)
orderID = Column(Integer, ForeignKey('tblOrder.orderID'))
artNum = Column(Integer, ForeignKey('tblProducts.artNum'))
quantity = Column(Integer)
pfandCrates = Column(Float)
pfandBottles = Column(Integer)
def get_subtotals(self, orderID):
# query tblOrderDetail for all entries with given orderID
# query
pass
"""----------------------------------------------------------------------------------------------"""
class StockTake(Base):
__tablename__ = "tblStockTake"
stockTakeID = Column(Integer, primary_key=True)
timestamp = Column(Integer) # how does this one work?
note = Column(String)
stocktakedetail = relationship("StockTakeDetail", back_populates="stocktake")
def get_inventory_value(self, StockTake):
# should value be what we payed for it or what we get when selling?
total = 0
details = self.stocktakedetail
for instances in details:
total += details.get_unit_price()
class StockTakeDetail(Base):
"""
'cost' is what we pay.
'price' is what the customer pays.
"""
__tablename__ = "tblStockTakeDetail"
stockTakeDetailID = Column(Integer, primary_key=True)
stockTakeID = Column(Integer, ForeignKey('tblStockTake.stockTakeID'))
artNum = Column(Integer, ForeignKey('tblProducts.artNum'))
quantity = Column(Integer)
unitCost = Column(Float) # pro Liefereinheit, also praktisch pro Kasten
bottleSurcharge = Column(Float)
pfandCrates = Column(Float)
pfandBottles = Column(Integer)
stocktake = relationship("StockTake", back_populates="stocktakedetail")
product = relationship("Product")
def get_unit_price(self):
return (self.unitCost*1.19 + (self.bottleSurcharge * self.product.bottlesPerUnit))
Base.metadata.create_all(engine)
"""----------------------------------------------------------------------------------------------"""
# if doesn't exist, create Products for Pfand bottles to database
#
# This is necessary in order to track additional Pfand Bottles without having to
# worry about what's on the label
# I need this and can't get it from config due to circular dependencies
def init_check_exists(input):
for instance in session.query(Product.artNum):
if instance.artNum == input:
return True
return False
crates = Product(
artNum = 10000,
name = "Pfandkasten",
bottlesPerUnit = 0,
cratesPerUnit = 0,
bottlePfand = 0.08,
)
bottle008 = Product(
artNum = 10001,
name = "0.08 Pfandflasche",
bottlesPerUnit = 0,
cratesPerUnit = 0,
bottlePfand = 0.08,
)
bottle015 = Product(
artNum = 10002,
name = "0.15 Pfandflasche",
bottlesPerUnit = 0,
cratesPerUnit = 0,
bottlePfand = 0.15,
)
if init_check_exists(crates.artNum) is False:
session.add(crates)
session.commit()
if init_check_exists(bottle008.artNum) is False:
session.add(bottle008)
session.commit()
if init_check_exists(bottle015.artNum) is False:
session.add(bottle015)
session.commit()
|
mrquim/mrquimrepo
|
refs/heads/master
|
repo/script.module.youtube.dl/lib/youtube_dl/extractor/rds.py
|
57
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
js_to_json,
)
from ..compat import compat_str
class RDSIE(InfoExtractor):
IE_DESC = 'RDS.ca'
_VALID_URL = r'https?://(?:www\.)?rds\.ca/vid(?:[eé]|%C3%A9)os/(?:[^/]+/)*(?P<id>[^/]+)-\d+\.\d+'
_TESTS = [{
'url': 'http://www.rds.ca/videos/football/nfl/fowler-jr-prend-la-direction-de-jacksonville-3.1132799',
'info_dict': {
'id': '604333',
'display_id': 'fowler-jr-prend-la-direction-de-jacksonville',
'ext': 'mp4',
'title': 'Fowler Jr. prend la direction de Jacksonville',
'description': 'Dante Fowler Jr. est le troisième choix du repêchage 2015 de la NFL. ',
'timestamp': 1430397346,
'upload_date': '20150430',
'duration': 154.354,
'age_limit': 0,
}
}, {
'url': 'http://www.rds.ca/vid%C3%A9os/un-voyage-positif-3.877934',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
item = self._parse_json(self._search_regex(r'(?s)itemToPush\s*=\s*({.+?});', webpage, 'item'), display_id, js_to_json)
video_id = compat_str(item['id'])
title = item.get('title') or self._og_search_title(webpage) or self._html_search_meta(
'title', webpage, 'title', fatal=True)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
thumbnail = item.get('urlImageBig') or self._og_search_thumbnail(webpage) or self._search_regex(
[r'<link[^>]+itemprop="thumbnailUrl"[^>]+href="([^"]+)"',
r'<span[^>]+itemprop="thumbnailUrl"[^>]+content="([^"]+)"'],
webpage, 'thumbnail', fatal=False)
timestamp = parse_iso8601(self._search_regex(
r'<span[^>]+itemprop="uploadDate"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
duration = parse_duration(self._search_regex(
r'<span[^>]+itemprop="duration"[^>]+content="([^"]+)"',
webpage, 'duration', fatal=False))
age_limit = self._family_friendly_search(webpage)
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': '9c9media:rds_web:%s' % video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'age_limit': age_limit,
'ie_key': 'NineCNineMedia',
}
|
PokemonGoF/PokemonGo-Bot-Desktop
|
refs/heads/development
|
build/pywin/Lib/encodings/mac_croatian.py
|
593
|
""" Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Petrole/MaturePyRobots
|
refs/heads/master
|
WebPyRobot/backend/migrations/0008_notification.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-15 09:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('backend', '0007_battlehistory'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=200)),
('is_read', models.BooleanField(default=False)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifications', to=settings.AUTH_USER_MODEL)),
],
),
]
|
bashrc/zeronet-debian
|
refs/heads/master
|
src/update.py
|
1
|
#!/usr/bin/python
import urllib
import zipfile
import os
import ssl
import httplib
import socket
import re
import cStringIO as StringIO
from gevent import monkey
monkey.patch_all()
def update():
# Gevent https bug workaround (https://github.com/gevent/gevent/issues/477)
reload(socket)
reload(httplib)
reload(ssl)
print "Downloading.",
file = urllib.urlopen("https://github.com/HelloZeroNet/ZeroNet/archive/master.zip")
data = StringIO.StringIO()
while True:
buff = file.read(1024 * 16)
if not buff:
break
data.write(buff)
print ".",
print "Downloaded."
# Checking plugins
plugins_enabled = []
plugins_disabled = []
if os.path.isdir("plugins"):
for dir in os.listdir("plugins"):
if dir.startswith("disabled-"):
plugins_disabled.append(dir.replace("disabled-", ""))
else:
plugins_enabled.append(dir)
print "Plugins enabled:", plugins_enabled, "disabled:", plugins_disabled
print "Extracting...",
zip = zipfile.ZipFile(data)
for inner_path in zip.namelist():
if ".." in inner_path:
continue
inner_path = inner_path.replace("\\", "/") # Make sure we have unix path
print ".",
dest_path = inner_path.replace("ZeroNet-master/", "")
if not dest_path:
continue
# Keep plugin disabled/enabled status
match = re.match("plugins/([^/]+)", dest_path)
if match:
plugin_name = match.group(1).replace("disabled-", "")
if plugin_name in plugins_enabled: # Plugin was enabled
dest_path = dest_path.replace("plugins/disabled-" + plugin_name, "plugins/" + plugin_name)
elif plugin_name in plugins_disabled: # Plugin was disabled
dest_path = dest_path.replace("plugins/" + plugin_name, "plugins/disabled-" + plugin_name)
print "P",
dest_dir = os.path.dirname(dest_path)
if dest_dir and not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
if dest_dir != dest_path.strip("/"):
data = zip.read(inner_path)
try:
open(dest_path, 'wb').write(data)
except Exception, err:
print dest_path, err
print "Done."
if __name__ == "__main__":
try:
update()
except Exception, err:
print "Update error: %s" % err
raw_input("Press enter to exit")
|
ahamilton55/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_datacenters_facts.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_datacenters_facts
short_description: Retrieve facts about one or more oVirt/RHV datacenters
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV datacenters."
notes:
- "This module creates a new top-level C(ovirt_datacenters) fact, which
contains a list of datacenters."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search datacenter I(X) use following pattern: I(name=X)"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all data centers which names start with C(production):
- ovirt_datacenters_facts:
pattern: name=production*
- debug:
var: ovirt_datacenters
'''
RETURN = '''
ovirt_datacenters:
description: "List of dictionaries describing the datacenters. Datacenter attribues are mapped to dictionary keys,
all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
datacenters_service = connection.system_service().data_centers_service()
datacenters = datacenters_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_datacenters=[
get_dict_of_struct(
struct=d,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for d in datacenters
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
alisaifee/AutobahnPython
|
refs/heads/master
|
examples/asyncio/websocket/echo/server.py
|
13
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
## echo back message verbatim
self.sendMessage(payload, isBinary)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
import asyncio
factory = WebSocketServerFactory("ws://localhost:9000", debug = False)
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '127.0.0.1', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
|
RachitKansal/scikit-learn
|
refs/heads/master
|
sklearn/random_projection.py
|
207
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, NotFittedError
from .utils import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.components_ is None:
raise NotFittedError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
deberon/ansible-modules-extras
|
refs/heads/devel
|
packaging/os/pkgin.py
|
43
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Shaun Zinck
# Written by Shaun Zinck <shaun.zinck at gmail.com>
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS
description:
- Manages SmartOS packages
version_added: "1.0"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
author: Shaun Zinck
notes: []
'''
EXAMPLES = '''
# install package foo"
- pkgin: name=foo state=present
# remove package foo
- pkgin: name=foo state=absent
# remove packages foo and bar
- pkgin: name=foo,bar state=absent
'''
import json
import shlex
import os
import sys
import pipes
def query_package(module, pkgin_path, name, state="present"):
if state == "present":
rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
# At least one package with a package name that starts with ``name``
# is installed. For some cases this is not sufficient to determine
# wether the queried package is installed.
#
# E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but
# ``gcc47-libs`` being installed, ``out`` would be:
#
# gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries.
#
# Multiline output is also possible, for example with the same query
# and bot ``gcc47`` and ``gcc47-libs`` being installed:
#
# gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries.
# gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series
# Loop over lines in ``out``
for line in out.split('\n'):
# Strip description
# (results in sth. like 'gcc47-libs-4.7.2nb4')
pkgname_with_version = out.split(' ')[0]
# Strip version
# (results in sth like 'gcc47-libs')
pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1])
if name == pkgname_without_version:
return True
return False
def remove_packages(module, pkgin_path, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgin_path, package):
continue
rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package))
if query_package(module, pkgin_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgin_path, packages):
install_c = 0
for package in packages:
if query_package(module, pkgin_path, package):
continue
rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package))
if not query_package(module, pkgin_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"]),
name = dict(aliases=["pkg"], required=True)))
pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
p = module.params
pkgs = p["name"].split(",")
if p["state"] == "present":
install_packages(module, pkgin_path, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgin_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
|
enclose-io/compiler
|
refs/heads/master
|
current/tools/specialize_node_d.py
|
6
|
#!/usr/bin/env python
#
# specialize_node_d.py output_file src/node.d flavor arch
#
# Specialize node.d for given flavor (`freebsd`) and arch (`x64` or `ia32`)
#
from __future__ import print_function
import re
import sys
if len(sys.argv) != 5:
print("usage: specialize_node_d.py outfile src/node.d flavor arch")
sys.exit(2)
outfile = open(sys.argv[1], 'w')
infile = open(sys.argv[2], 'r')
flavor = sys.argv[3]
arch = sys.argv[4]
model = r'curpsinfo->pr_dmodel == PR_MODEL_ILP32'
for line in infile:
if flavor == 'freebsd':
line = re.sub('procfs.d', 'psinfo.d', line)
if arch == 'x64':
line = re.sub(model, '0', line)
else:
line = re.sub(model, '1', line)
outfile.write(line)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.