repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
hirokiky/gargant.dispatch
|
refs/heads/master
|
gargant/dispatch/__init__.py
|
1
|
"""
t = lambda condition: True
f = lambda condition: False
tree = Node((t,),
children=[
Node((t,),
case='posts',
name='posts',
children=[
Node((f,),
case='post_detail',
name='post_detail',
),
Node((t,),
case='post_create',
name='post_create'
),
]),
Node((f,),
case='about',
name='about'),
])
"""
from gargant.dispatch.matching import (
method_matching,
path_matching,
)
class NotMatched(Exception):
pass
class Node(object):
def __init__(self, matchings, case=None, name='', children=[], adapter_factory=lambda x: lambda x: x):
self.matchings = matchings
self.case = case
self.name = name
self.children = children
self.parent = None
for child in self.children:
child.parent = self
self.adapter_factory = adapter_factory
def __call__(self, condition):
matched = list(map(lambda x: x(condition), self.matchings))
if all(matched):
self.matched = matched
self.adapter = self.adapter_factory(matched)
if self.children:
for node in self.children:
try:
return node(condition)
except NotMatched:
continue
return self
raise NotMatched
def __iter__(self):
return self
def next(self):
if not hasattr(self, '_processing_node'):
self._processing_node = self
else:
self._processing_node = self._processing_node.parent
if self._processing_node:
return self._processing_node
else:
raise StopIteration
__next__ = next
ENVIRON_MATCHED_NODE_NAME = 'gargant.dispatch.matched_node'
def make_wsgi_app(tree):
def wsgi_app(environ, start_response):
node = tree(environ)
environ[ENVIRON_MATCHED_NODE_NAME] = node
return node.case(environ, start_response)
return wsgi_app
|
resmo/ansible
|
refs/heads/devel
|
test/units/modules/cloud/linode/test_linode.py
|
77
|
from __future__ import (absolute_import, division, print_function)
import pytest
from ansible.modules.cloud.linode import linode
from units.modules.utils import set_module_args
if not linode.HAS_LINODE:
pytestmark = pytest.mark.skip('test_linode.py requires the `linode-python` module')
def test_name_is_a_required_parameter(api_key, auth):
with pytest.raises(SystemExit):
set_module_args({})
linode.main()
|
elbow-jason/learning-goat-django
|
refs/heads/master
|
lists/views.py
|
1
|
from django.shortcuts import render, redirect
from lists.models import Item, List
def home_page(request):
if request.method == 'POST':
list_ = List.objects.create()
Item.objects.create(
text=request.POST['item_text'],
list=list_
)
return redirect('/lists/the-only-list-in-the-world')
return render(request, 'home.html')
def view_list(request):
items = Item.objects.all()
return render(request, 'list.html', {'items': items})
def new_list(request):
list_ = List.objects.create()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/the-only-list-in-the-world')
|
farseerfc/pacman-fc
|
refs/heads/master
|
test/pacman/tests/upgrade057.py
|
28
|
self.description = "Upgrade a package that both provides and is a dependency"
lp1 = pmpkg("pkg1")
lp1.depends = ["pkg2", "imag3"]
self.addpkg2db("local", lp1)
lp2 = pmpkg("pkg2")
lp2.provides = ["imag3"]
self.addpkg2db("local", lp2)
p = pmpkg("pkg2", "1.0-2")
p.provides = ["imag3"]
self.addpkg(p)
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_VERSION=pkg2|1.0-2")
self.addrule("PKG_DEPENDS=pkg1|pkg2")
self.addrule("PKG_DEPENDS=pkg1|imag3")
|
petertodd/bitcoin
|
refs/heads/master
|
qa/rpc-tests/listtransactions.py
|
145
|
#!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
ferranti/watchdog
|
refs/heads/master
|
import/parse/earmarks.py
|
3
|
#!/usr/bin/env python
"""
A parser for the 2008 earmarks data (XLS format) from http://taxpayer.net/
This script depends on xls2list which will convert the excel file to a 2d array.
It then does some trivial parsing of each field and outputs the data in a few ways.
"""
__author__ = ['Alex Gourley <acgourley@gmail.com>',
'Aaron Swartz <me@aaronsw.com>']
EARMARK_FILE = '../data/crawl/taxpayer/bigkahuna.xls'
import sys, re
import web
import xls2list
fmt = (
'id',
'house_request', 'senate_request',
'prereduction_amt', 'final_amt', 'budget_request',
'description', 'city', 'county', 'state',
'bill', 'bill_section', 'bill_subsection', 'project_heading',
'house_member', 'house_party', 'house_state', 'district',
'senate_member', 'senate_party', 'senate_state',
'presidential', 'undisclosed', 'intended_recipient',
'notes'
)
def parse_row(row):
out = web.storage()
for n, item in enumerate(fmt):
out[item] = row[n]
out.house_member = (out.house_member or []) and [x.strip() for x in out.house_member.split(';')]
out.senate_member = (out.senate_member or []) and [x.strip() for x in out.senate_member.split(';')]
#out.state = (out.state or []) and [x.strip() for x in out.state.split(';')]
return out
def parse_file(fn):
"""Break down the xls into a 2d data array, stripping off first rows which do not have data."""
data = xls2list.xls2list(fn)
for n, row in enumerate(data[3:]):
r = parse_row(row)
# All of the earmarks have a description, stop when we finish all
# earmarks
if not r.description: break
# The id's aren't remotely uniq, map to something that is
r.id=n+1 # Lets start at 1 instead of 0
yield r
if __name__ == "__main__":
import tools
tools.export(parse_file(EARMARK_FILE))
|
shikil/sympy
|
refs/heads/master
|
sympy/physics/optics/tests/test_medium.py
|
70
|
from __future__ import division
from sympy import sqrt, simplify
from sympy.physics.optics import Medium
from sympy.abc import epsilon, mu
from sympy.physics.units import c, u0, e0, m, kg, s, A
def test_medium():
m1 = Medium('m1')
assert m1.intrinsic_impedance == sqrt(u0/e0)
assert m1.speed == 1/sqrt(e0*u0)
assert m1.refractive_index == c*sqrt(e0*u0)
assert m1.permittivity == e0
assert m1.permeability == u0
m2 = Medium('m2', epsilon, mu)
assert m2.intrinsic_impedance == sqrt(mu/epsilon)
assert m2.speed == 1/sqrt(epsilon*mu)
assert m2.refractive_index == c*sqrt(epsilon*mu)
assert m2.permittivity == epsilon
assert m2.permeability == mu
# Increasing electric permittivity and magnetic permeability
# by small amount from its value in vacuum.
m3 = Medium('m3', 9.0*10**(-12)*s**4*A**2/(m**3*kg), 1.45*10**(-6)*kg*m/(A**2*s**2))
assert m3.refractive_index > m1.refractive_index
assert m3 > m1
# Decreasing electric permittivity and magnetic permeability
# by small amount from its value in vacuum.
m4 = Medium('m4', 7.0*10**(-12)*s**4*A**2/(m**3*kg), 1.15*10**(-6)*kg*m/(A**2*s**2))
assert m4.refractive_index < m1.refractive_index
assert m4 < m1
m5 = Medium('m5', permittivity=710*10**(-12)*s**4*A**2/(m**3*kg), n=1.33)
assert abs(m5.intrinsic_impedance - 6.24845417765552*kg*m**2/(A**2*s**3)) \
< 1e-12*kg*m**2/(A**2*s**3)
assert abs(m5.speed - 225407863.157895*m/s) < 1e-6*m/s
assert abs(m5.refractive_index - 1.33000000000000) < 1e-12
assert abs(m5.permittivity - 7.1e-10*A**2*s**4/(kg*m**3)) \
< 1e-20*A**2*s**4/(kg*m**3)
assert abs(m5.permeability - 2.77206575232851e-8*kg*m/(A**2*s**2)) \
< 1e-20*kg*m/(A**2*s**2)
|
rednach/krill
|
refs/heads/master
|
test/test_business_rules_with_bad_realm_conf.py
|
18
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBusinessRulesBadRealmConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_business_rules_bad_realm_conf.cfg')
def test_bad_conf(self):
self.assertFalse(self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
|
rayluo/boto
|
refs/heads/develop
|
boto/support/layer1.py
|
151
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.support import exceptions
class SupportConnection(AWSQueryConnection):
"""
AWS Support
The AWS Support API reference is intended for programmers who need
detailed information about the AWS Support operations and data
types. This service enables you to manage your AWS Support cases
programmatically. It uses HTTP methods that return results in JSON
format.
The AWS Support service also exposes a set of `Trusted Advisor`_
features. You can retrieve a list of checks and their
descriptions, get check results, specify checks to refresh, and
get the refresh status of checks.
The following list describes the AWS Support case management
operations:
+ **Service names, issue categories, and available severity
levels. **The DescribeServices and DescribeSeverityLevels
operations return AWS service names, service codes, service
categories, and problem severity levels. You use these values when
you call the CreateCase operation.
+ **Case creation, case details, and case resolution.** The
CreateCase, DescribeCases, DescribeAttachment, and ResolveCase
operations create AWS Support cases, retrieve information about
cases, and resolve cases.
+ **Case communication.** The DescribeCommunications,
AddCommunicationToCase, and AddAttachmentsToSet operations
retrieve and add communications and attachments to AWS Support
cases.
The following list describes the operations available from the AWS
Support service for Trusted Advisor:
+ DescribeTrustedAdvisorChecks returns the list of checks that run
against your AWS resources.
+ Using the `CheckId` for a specific check returned by
DescribeTrustedAdvisorChecks, you can call
DescribeTrustedAdvisorCheckResult to obtain the results for the
check you specified.
+ DescribeTrustedAdvisorCheckSummaries returns summarized results
for one or more Trusted Advisor checks.
+ RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a
specified check.
+ DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh
status of one or more checks.
For authentication of requests, AWS Support uses `Signature
Version 4 Signing Process`_.
See `About the AWS Support API`_ in the AWS Support User Guide for
information about how to use this service to create and manage
your support cases, and how to call Trusted Advisor for results of
checks on your resources.
"""
APIVersion = "2013-04-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "support.us-east-1.amazonaws.com"
ServiceName = "Support"
TargetPrefix = "AWSSupport_20130415"
ResponseError = JSONResponseError
_faults = {
"CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded,
"AttachmentLimitExceeded": exceptions.AttachmentLimitExceeded,
"CaseIdNotFound": exceptions.CaseIdNotFound,
"DescribeAttachmentLimitExceeded": exceptions.DescribeAttachmentLimitExceeded,
"AttachmentSetIdNotFound": exceptions.AttachmentSetIdNotFound,
"InternalServerError": exceptions.InternalServerError,
"AttachmentSetExpired": exceptions.AttachmentSetExpired,
"AttachmentIdNotFound": exceptions.AttachmentIdNotFound,
"AttachmentSetSizeLimitExceeded": exceptions.AttachmentSetSizeLimitExceeded,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(SupportConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_attachments_to_set(self, attachments, attachment_set_id=None):
"""
Adds one or more attachments to an attachment set. If an
`AttachmentSetId` is not specified, a new attachment set is
created, and the ID of the set is returned in the response. If
an `AttachmentSetId` is specified, the attachments are added
to the specified set, if it exists.
An attachment set is a temporary container for attachments
that are to be added to a case or case communication. The set
is available for one hour after it is created; the
`ExpiryTime` returned in the response indicates when the set
expires. The maximum number of attachments in a set is 3, and
the maximum size of any attachment in the set is 5 MB.
:type attachment_set_id: string
:param attachment_set_id: The ID of the attachment set. If an
`AttachmentSetId` is not specified, a new attachment set is
created, and the ID of the set is returned in the response. If an
`AttachmentSetId` is specified, the attachments are added to the
specified set, if it exists.
:type attachments: list
:param attachments: One or more attachments to add to the set. The
limit is 3 attachments per set, and the size limit is 5 MB per
attachment.
"""
params = {'attachments': attachments, }
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='AddAttachmentsToSet',
body=json.dumps(params))
def add_communication_to_case(self, communication_body, case_id=None,
cc_email_addresses=None,
attachment_set_id=None):
"""
Adds additional customer communication to an AWS Support case.
You use the `CaseId` value to identify the case to add
communication to. You can list a set of email addresses to
copy on the communication using the `CcEmailAddresses` value.
The `CommunicationBody` value contains the text of the
communication.
The response indicates the success or failure of the request.
This operation implements a subset of the behavior on the AWS
Support `Your Support Cases`_ web form.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type communication_body: string
:param communication_body: The body of an email communication to add to
the support case.
:type cc_email_addresses: list
:param cc_email_addresses: The email addresses in the CC line of an
email to be added to the support case.
:type attachment_set_id: string
:param attachment_set_id: The ID of a set of one or more attachments
for the communication to add to the case. Create the set by calling
AddAttachmentsToSet
"""
params = {'communicationBody': communication_body, }
if case_id is not None:
params['caseId'] = case_id
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='AddCommunicationToCase',
body=json.dumps(params))
def create_case(self, subject, communication_body, service_code=None,
severity_code=None, category_code=None,
cc_email_addresses=None, language=None, issue_type=None,
attachment_set_id=None):
"""
Creates a new case in the AWS Support Center. This operation
is modeled on the behavior of the AWS Support Center `Open a
new case`_ page. Its parameters require you to specify the
following information:
#. **IssueType.** The type of issue for the case. You can
specify either "customer-service" or "technical." If you do
not indicate a value, the default is "technical."
#. **ServiceCode.** The code for an AWS service. You obtain
the `ServiceCode` by calling DescribeServices.
#. **CategoryCode.** The category for the service defined for
the `ServiceCode` value. You also obtain the category code for
a service by calling DescribeServices. Each AWS service
defines its own set of category codes.
#. **SeverityCode.** A value that indicates the urgency of the
case, which in turn determines the response time according to
your service level agreement with AWS Support. You obtain the
SeverityCode by calling DescribeSeverityLevels.
#. **Subject.** The **Subject** field on the AWS Support
Center `Open a new case`_ page.
#. **CommunicationBody.** The **Description** field on the AWS
Support Center `Open a new case`_ page.
#. **AttachmentSetId.** The ID of a set of attachments that
has been created by using AddAttachmentsToSet.
#. **Language.** The human language in which AWS Support
handles the case. English and Japanese are currently
supported.
#. **CcEmailAddresses.** The AWS Support Center **CC** field
on the `Open a new case`_ page. You can list email addresses
to be copied on any correspondence about the case. The account
that opens the case is already identified by passing the AWS
Credentials in the HTTP POST method or in a method or function
call from one of the programming languages supported by an
`AWS SDK`_.
A successful CreateCase request returns an AWS Support case
number. Case numbers are used by the DescribeCases operation
to retrieve existing AWS Support cases.
:type subject: string
:param subject: The title of the AWS Support case.
:type service_code: string
:param service_code: The code for the AWS service returned by the call
to DescribeServices.
:type severity_code: string
:param severity_code: The code for the severity level returned by the
call to DescribeSeverityLevels.
:type category_code: string
:param category_code: The category of problem for the AWS Support case.
:type communication_body: string
:param communication_body: The communication body text when you create
an AWS Support case by calling CreateCase.
:type cc_email_addresses: list
:param cc_email_addresses: A list of email addresses that AWS Support
copies on case correspondence.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
:type issue_type: string
:param issue_type: The type of issue for the case. You can specify
either "customer-service" or "technical." If you do not indicate a
value, the default is "technical."
:type attachment_set_id: string
:param attachment_set_id: The ID of a set of one or more attachments
for the case. Create the set by using AddAttachmentsToSet.
"""
params = {
'subject': subject,
'communicationBody': communication_body,
}
if service_code is not None:
params['serviceCode'] = service_code
if severity_code is not None:
params['severityCode'] = severity_code
if category_code is not None:
params['categoryCode'] = category_code
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if language is not None:
params['language'] = language
if issue_type is not None:
params['issueType'] = issue_type
if attachment_set_id is not None:
params['attachmentSetId'] = attachment_set_id
return self.make_request(action='CreateCase',
body=json.dumps(params))
def describe_attachment(self, attachment_id):
"""
Returns the attachment that has the specified ID. Attachment
IDs are generated by the case management system when you add
an attachment to a case or case communication. Attachment IDs
are returned in the AttachmentDetails objects that are
returned by the DescribeCommunications operation.
:type attachment_id: string
:param attachment_id: The ID of the attachment to return. Attachment
IDs are returned by the DescribeCommunications operation.
"""
params = {'attachmentId': attachment_id, }
return self.make_request(action='DescribeAttachment',
body=json.dumps(params))
def describe_cases(self, case_id_list=None, display_id=None,
after_time=None, before_time=None,
include_resolved_cases=None, next_token=None,
max_results=None, language=None,
include_communications=None):
"""
Returns a list of cases that you specify by passing one or
more case IDs. In addition, you can filter the cases by date
by setting values for the `AfterTime` and `BeforeTime` request
parameters.
Case data is available for 12 months after creation. If a case
was created more than 12 months ago, a request for data might
cause an error.
The response returns the following in JSON format:
#. One or more CaseDetails data types.
#. One or more `NextToken` values, which specify where to
paginate the returned records represented by the `CaseDetails`
objects.
:type case_id_list: list
:param case_id_list: A list of ID numbers of the support cases you want
returned. The maximum number of cases is 100.
:type display_id: string
:param display_id: The ID displayed for a case in the AWS Support
Center user interface.
:type after_time: string
:param after_time: The start date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type before_time: string
:param before_time: The end date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type include_resolved_cases: boolean
:param include_resolved_cases: Specifies whether resolved support cases
should be included in the DescribeCases results. The default is
false .
:type next_token: string
:param next_token: A resumption point for pagination.
:type max_results: integer
:param max_results: The maximum number of results to return before
paginating.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
:type include_communications: boolean
:param include_communications: Specifies whether communications should
be included in the DescribeCases results. The default is true .
"""
params = {}
if case_id_list is not None:
params['caseIdList'] = case_id_list
if display_id is not None:
params['displayId'] = display_id
if after_time is not None:
params['afterTime'] = after_time
if before_time is not None:
params['beforeTime'] = before_time
if include_resolved_cases is not None:
params['includeResolvedCases'] = include_resolved_cases
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
if language is not None:
params['language'] = language
if include_communications is not None:
params['includeCommunications'] = include_communications
return self.make_request(action='DescribeCases',
body=json.dumps(params))
def describe_communications(self, case_id, before_time=None,
after_time=None, next_token=None,
max_results=None):
"""
Returns communications (and attachments) for one or more
support cases. You can use the `AfterTime` and `BeforeTime`
parameters to filter by date. You can use the `CaseId`
parameter to restrict the results to a particular case.
Case data is available for 12 months after creation. If a case
was created more than 12 months ago, a request for data might
cause an error.
You can use the `MaxResults` and `NextToken` parameters to
control the pagination of the result set. Set `MaxResults` to
the number of cases you want displayed on each page, and use
`NextToken` to specify the resumption of pagination.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
:type before_time: string
:param before_time: The end date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type after_time: string
:param after_time: The start date for a filtered date search on support
case communications. Case communications are available for 12
months after creation.
:type next_token: string
:param next_token: A resumption point for pagination.
:type max_results: integer
:param max_results: The maximum number of results to return before
paginating.
"""
params = {'caseId': case_id, }
if before_time is not None:
params['beforeTime'] = before_time
if after_time is not None:
params['afterTime'] = after_time
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self.make_request(action='DescribeCommunications',
body=json.dumps(params))
def describe_services(self, service_code_list=None, language=None):
"""
Returns the current list of AWS services and a list of service
categories that applies to each one. You then use service
names and categories in your CreateCase requests. Each AWS
service has its own set of categories.
The service codes and category codes correspond to the values
that are displayed in the **Service** and **Category** drop-
down lists on the AWS Support Center `Open a new case`_ page.
The values in those fields, however, do not necessarily match
the service codes and categories returned by the
`DescribeServices` request. Always use the service codes and
categories obtained programmatically. This practice ensures
that you always have the most recent set of service and
category codes.
:type service_code_list: list
:param service_code_list: A JSON-formatted list of service codes
available for AWS services.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {}
if service_code_list is not None:
params['serviceCodeList'] = service_code_list
if language is not None:
params['language'] = language
return self.make_request(action='DescribeServices',
body=json.dumps(params))
def describe_severity_levels(self, language=None):
"""
Returns the list of severity levels that you can assign to an
AWS Support case. The severity level for a case is also a
field in the CaseDetails data type included in any CreateCase
request.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {}
if language is not None:
params['language'] = language
return self.make_request(action='DescribeSeverityLevels',
body=json.dumps(params))
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
"""
Returns the refresh status of the Trusted Advisor checks that
have the specified check IDs. Check IDs can be obtained by
calling DescribeTrustedAdvisorChecks.
:type check_ids: list
:param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses',
body=json.dumps(params))
def describe_trusted_advisor_check_result(self, check_id, language=None):
"""
Returns the results of the Trusted Advisor check that has the
specified check ID. Check IDs can be obtained by calling
DescribeTrustedAdvisorChecks.
The response contains a TrustedAdvisorCheckResult object,
which contains these three objects:
+ TrustedAdvisorCategorySpecificSummary
+ TrustedAdvisorResourceDetail
+ TrustedAdvisorResourcesSummary
In addition, the response contains these fields:
+ **Status.** The alert status of the check: "ok" (green),
"warning" (yellow), "error" (red), or "not_available".
+ **Timestamp.** The time of the last refresh of the check.
+ **CheckId.** The unique identifier for the check.
:type check_id: string
:param check_id: The unique identifier for the Trusted Advisor check.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {'checkId': check_id, }
if language is not None:
params['language'] = language
return self.make_request(action='DescribeTrustedAdvisorCheckResult',
body=json.dumps(params))
def describe_trusted_advisor_check_summaries(self, check_ids):
"""
Returns the summaries of the results of the Trusted Advisor
checks that have the specified check IDs. Check IDs can be
obtained by calling DescribeTrustedAdvisorChecks.
The response contains an array of TrustedAdvisorCheckSummary
objects.
:type check_ids: list
:param check_ids: The IDs of the Trusted Advisor checks.
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckSummaries',
body=json.dumps(params))
def describe_trusted_advisor_checks(self, language):
"""
Returns information about all available Trusted Advisor
checks, including name, ID, category, description, and
metadata. You must specify a language code; English ("en") and
Japanese ("ja") are currently supported. The response contains
a TrustedAdvisorCheckDescription for each check.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS
provides support. AWS Support currently supports English ("en") and
Japanese ("ja"). Language parameters must be passed explicitly for
operations that take them.
"""
params = {'language': language, }
return self.make_request(action='DescribeTrustedAdvisorChecks',
body=json.dumps(params))
def refresh_trusted_advisor_check(self, check_id):
"""
Requests a refresh of the Trusted Advisor check that has the
specified check ID. Check IDs can be obtained by calling
DescribeTrustedAdvisorChecks.
The response contains a RefreshTrustedAdvisorCheckResult
object, which contains these fields:
+ **Status.** The refresh status of the check: "none",
"enqueued", "processing", "success", or "abandoned".
+ **MillisUntilNextRefreshable.** The amount of time, in
milliseconds, until the check is eligible for refresh.
+ **CheckId.** The unique identifier for the check.
:type check_id: string
:param check_id: The unique identifier for the Trusted Advisor check.
"""
params = {'checkId': check_id, }
return self.make_request(action='RefreshTrustedAdvisorCheck',
body=json.dumps(params))
def resolve_case(self, case_id=None):
"""
Takes a `CaseId` and returns the initial state of the case
along with the state of the case after the call to ResolveCase
completed.
:type case_id: string
:param case_id: The AWS Support case ID requested or returned in the
call. The case ID is an alphanumeric string formatted as shown in
this example: case- 12345678910-2013-c4c1d2bf33c5cf47
"""
params = {}
if case_id is not None:
params['caseId'] = case_id
return self.make_request(action='ResolveCase',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
amanikamail/flexx
|
refs/heads/master
|
make/__main__.py
|
21
|
# License: consider this public domain
from __future__ import absolute_import, division, print_function
import sys
import os
import os.path as op
THIS_DIR = op.dirname(op.abspath(__file__))
ROOT_DIR = op.dirname(THIS_DIR)
# Setup paths
os.chdir(ROOT_DIR)
sys.path.insert(0, ROOT_DIR)
if 'make' in sys.path:
sys.path.remove('make')
# Import __init__ with project specific dirs
import make
assert ROOT_DIR == make.ROOT_DIR
def run(command, *args):
""" Run command with specified args.
"""
# Import the module that defines the command
if not op.isfile(op.join(THIS_DIR, command + '.py')):
sys.exit('Invalid command: %r' % command)
makemodule = __import__('make.'+command)
# Get the corresponding function
m = getattr(makemodule, command)
f = getattr(m, command, None)
# Call or fail
if f is None:
sys.exit('Module %s.py does not contain function %s().' %
(command, command))
f(*args)
def main():
if len(sys.argv) == 1:
run('help')
else:
command = sys.argv[1].strip()
run(command, *sys.argv[2:])
# Put some funcs in make namespace
make.run = run
main()
|
wangscript/libjingle-1
|
refs/heads/master
|
trunk/tools/clang/scripts/test_tool.py
|
5
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test harness for chromium clang tools."""
import difflib
import glob
import json
import os
import os.path
import subprocess
import shutil
import sys
def _GenerateCompileCommands(files):
"""Returns a JSON string containing a compilation database for the input."""
return json.dumps([{'directory': '.',
'command': 'clang++ -fsyntax-only -c %s' % f,
'file': f} for f in files], indent=2)
def _NumberOfTestsToString(tests):
"""Returns an English describing the number of tests."""
return "%d test%s" % (tests, 's' if tests != 1 else '')
def main(argv):
if len(argv) < 1:
print 'Usage: test_tool.py <clang tool>'
print ' <clang tool> is the clang tool to be tested.'
sys.exit(1)
tool_to_test = argv[0]
tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
test_directory_for_tool = os.path.join(
tools_clang_directory, tool_to_test, 'tests')
compile_database = os.path.join(test_directory_for_tool,
'compile_commands.json')
source_files = glob.glob(os.path.join(test_directory_for_tool,
'*-original.cc'))
actual_files = ['-'.join([source_file.rsplit('-', 2)[0], 'actual.cc'])
for source_file in source_files]
expected_files = ['-'.join([source_file.rsplit('-', 2)[0], 'expected.cc'])
for source_file in source_files]
try:
# Set up the test environment.
for source, actual in zip(source_files, actual_files):
shutil.copyfile(source, actual)
# Stage the test files in the git index. If they aren't staged, then
# run_tools.py will skip them when applying replacements.
args = ['git', 'add']
args.extend(actual_files)
subprocess.check_call(args)
# Generate a temporary compilation database to run the tool over.
with open(compile_database, 'w') as f:
f.write(_GenerateCompileCommands(actual_files))
args = ['python',
os.path.join(tools_clang_scripts_directory, 'run_tool.py'),
tool_to_test,
test_directory_for_tool]
args.extend(actual_files)
run_tool = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, _ = run_tool.communicate()
if run_tool.returncode != 0:
print 'run_tool failed:\n%s' % stdout
sys.exit(1)
passed = 0
failed = 0
for expected, actual in zip(expected_files, actual_files):
print '[ RUN ] %s' % os.path.relpath(actual)
expected_output = actual_output = None
with open(expected, 'r') as f:
expected_output = f.readlines()
with open(actual, 'r') as f:
actual_output = f.readlines()
if actual_output != expected_output:
print '[ FAILED ] %s' % os.path.relpath(actual)
failed += 1
for line in difflib.unified_diff(expected_output, actual_output,
fromfile=os.path.relpath(expected),
tofile=os.path.relpath(actual)):
sys.stdout.write(line)
# Don't clean up the file on failure, so the results can be referenced
# more easily.
continue
print '[ OK ] %s' % os.path.relpath(actual)
passed += 1
os.remove(actual)
if failed == 0:
os.remove(compile_database)
print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
if passed > 0:
print '[ PASSED ] %s.' % _NumberOfTestsToString(passed)
if failed > 0:
print '[ FAILED ] %s.' % _NumberOfTestsToString(failed)
finally:
# No matter what, unstage the git changes we made earlier to avoid polluting
# the index.
args = ['git', 'reset', '--quiet', 'HEAD']
args.extend(actual_files)
subprocess.call(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
victorywang80/Maintenance
|
refs/heads/master
|
saltstack/src/tests/integration/modules/decorators.py
|
1
|
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class DecoratorTest(integration.ModuleCase):
def test_module(self):
self.assertTrue(
self.run_function(
'runtests_decorators.working_function'
)
)
def not_test_depends(self):
ret = self.run_function('runtests_decorators.depends')
self.assertTrue(ret['ret'])
self.assertTrue(type(ret['time']) == float)
def test_missing_depends(self):
self.assertIn(
'is not available',
self.run_function('runtests_decorators.missing_depends'
)
)
def not_test_depends_will_fallback(self):
ret = self.run_function('runtests_decorators.depends_will_fallback')
self.assertTrue(ret['ret'])
self.assertTrue(type(ret['time']) == float)
def test_missing_depends_again(self):
self.assertIn(
'fallback',
self.run_function(
'runtests_decorators.missing_depends_will_fallback'
)
)
if __name__ == '__main__':
from integration import run_tests
run_tests(DecoratorTest)
|
jflamant/sphericalEMC
|
refs/heads/master
|
dataGeneration/generateSamplesShell.py
|
1
|
# file: generatSamplesShell.py
#
# This code generates a given number of patterns, on a specified shell. The
# samples are obtained by Poisson realizations of the underlying intensity.
# The theoretical intensity on the shell (continuous) is to be calculated
# using 'compute3Dintensityshell.py'.
#
# Copyright (c) J. Flamant, April 2016.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please visit http://www.gnu.org
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
from math import pi
def sph2cart(qi, theta, phi):
xi = qi*np.sin(theta)*np.cos(phi)
yi = qi*np.sin(theta)*np.sin(phi)
zi = qi*np.cos(theta)
return xi, yi, zi
def cart2sph(xi, yi, zi):
qi = np.sqrt(xi**2+yi**2+zi**2)
theta = np.arccos(zi/qi)
phi = np.arctan2(yi, xi)
return qi, theta, phi
def randS3(N=100):
'''
Generates N samples from the uniform distribution on S^3
Input :
- N: size of the sample
Ouput:
- X is a 4 x N array with columns being samples from Unif. dist on S^3
See Shoemake, K., 1992, July. Uniform random rotations. In Graphics Gems
III (pp. 124-132). Academic Press Professional, Inc..
'''
X = np.zeros((4, N))
x0 = np.random.rand(1, N)
theta1 = np.random.rand(1, N)*2*pi
theta2 = np.random.rand(1, N)*2*pi
X[0, :] = np.sin(theta1)*np.sqrt(1-x0)
X[1, :] = np.cos(theta1)*np.sqrt(1-x0)
X[2, :] = np.sin(theta2)*np.sqrt(x0)
X[3, :] = np.cos(theta2)*np.sqrt(x0)
return X
def quaternionMatrix(quat):
''' Converts a quaternion to a 3x3 matrix'''
q0 = quat[0]
q1 = quat[1]
q2 = quat[2]
q3 = quat[3]
R = np.array([[1-2*q1**2-2*q2**2, 2*(q0*q1 - q2*q3), 2*(q0*q2+q1*q3)],
[2*(q0*q1 + q2*q3) ,1-2*q0**2-2*q2**2, 2*(q1*q2-q0*q3)],
[2*(q0*q2 - q1*q3),2*(q0*q3 + q2*q1),1-2*q0**2-2*q1**2]])
return R
def rotate(x, y, z, q):
R = quaternionMatrix(q)
xr = R[0, 0]*x+R[0, 1]*y+R[0, 2]*z
yr = R[1, 0]*x+R[1, 1]*y+R[1, 2]*z
zr = R[2, 0]*x+R[2, 1]*y+R[2, 2]*z
return xr, yr, zr
def generateDataFromShell(shellIndex, nbSamples):
'''
This function generates nbSamples Poisson samples from the intensity
defined on the shellIndex-th shell.
Default is the 1REI biomolecule. Intensities were calculated numerically
on a HEALPix grid of parameter nside = 128.
Parameters:
- shellIndex: int, shell index of the considered shell
- nbSamples: int, number of desired samples
Return: Ndec \times nbSamples real matrix.
'''
# load intensity function
IShell = np.load('path_to_shells_intensities/'+str(shellIndex)+'.npy')
nside = 128
# define detector characteristics and experimental conditions
wavelength = 2 # in Angstroms
zD = 1e-2 # detector distance, in meters
pw = 3.38e-5 # pixel size, in meters
qmax = 4*pi/wavelength
gridsize = 512 # number of pixels in one direction (total size of the detector 1024*1024)
qmax_grid = qmax*np.sin(0.5*np.arctan(pw*gridsize/zD)) #largest value of q on the detector
Deltaq = qmax*np.sin(0.5*np.arctan(pw*1/zD)) # pixel spacing at the origin
q_shell = qmax*np.sin(0.5*np.arctan(pw*shellIndex/zD)) # q value at the shellIndex given
Npix_dec = int(np.ceil(2*pi*q_shell/Deltaq)) # Corresponding number of pixels
# coords detector in reciprocal space
qdec = np.ones(Npix_dec)*q_shell
thetadec = pi/2-np.arcsin(qdec/qmax)
phidec = np.linspace(0, 2*pi, Npix_dec+1)[:Npix_dec]
xdec, ydec, zdec = sph2cart(qdec, thetadec, phidec)
# create output array
Myk = np.zeros((Npix_dec, nbSamples))
# generate nbSamples uniform rotations in SO(3)
quatList = randS3(nbSamples)
# Get the samples
for k in range(nbSamples):
xr, yr, zr = rotate(xdec, ydec, zdec, quatList[:, k])
q, theta, phi = cart2sph(xr, yr, zr)
sampledI = hp.get_interp_val(IShell, theta, phi)
Myk[:, k] = np.random.poisson(sampledI)
return Myk
np.random.seed(1) # for reproducibility
# generate 1000 'patterns', from shellIndex=29
Myk = generateDataFromShell(29, 1000)
plt.plot(Myk)
plt.show()
#np.save('shell7Patterns1000.npy', Myk)
|
lmprice/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/manageiq/manageiq_provider.py
|
53
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: manageiq_provider
short_description: Management of provider in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.4'
author: Daniel Korn (@dkorn)
description:
- The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
options:
state:
description:
- absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
choices: ['absent', 'present', 'refresh']
default: 'present'
name:
description: The provider's name.
required: true
type:
description: The provider's type.
required: true
choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
zone:
description: The ManageIQ zone name that will manage the provider.
default: 'default'
provider_region:
description: The provider region name to connect to (e.g. AWS region for Amazon).
host_default_vnc_port_start:
description: The first port in the host VNC range. defaults to None.
version_added: "2.5"
host_default_vnc_port_end:
description: The last port in the host VNC range. defaults to None.
version_added: "2.5"
subscription:
description: Microsoft Azure subscription ID. defaults to None.
version_added: "2.5"
project:
description: Google Compute Engine Project ID. defaults to None.
version_added: "2.5"
azure_tenant_id:
description: Tenant ID. defaults to None.
version_added: "2.5"
aliases: [ keystone_v3_domain_id ]
tenant_mapping_enabled:
type: bool
default: 'no'
description: Whether to enable mapping of existing tenants. defaults to False.
version_added: "2.5"
api_version:
description: The OpenStack Keystone API version. defaults to None.
choices: ['v2', 'v3']
version_added: "2.5"
provider:
description: Default endpoint connection information, required if state is true.
suboptions:
hostname:
description: The provider's api hostname.
required: true
port:
description: The provider's api port.
userid:
description: Provider's api endpoint authentication userid. defaults to None.
password:
description: Provider's api endpoint authentication password. defaults to None.
auth_key:
description: Provider's api endpoint authentication bearer token. defaults to None.
verify_ssl:
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
type: bool
default: 'yes'
security_protocol:
description: How SSL certificates should be used for HTTPS requests. defaults to None.
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
certificate_authority:
description: The CA bundle string with custom certificates. defaults to None.
metrics:
description: Metrics endpoint connection information.
suboptions:
hostname:
description: The provider's api hostname.
required: true
port:
description: The provider's api port.
userid:
description: Provider's api endpoint authentication userid. defaults to None.
password:
description: Provider's api endpoint authentication password. defaults to None.
auth_key:
description: Provider's api endpoint authentication bearer token. defaults to None.
verify_ssl:
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
type: bool
default: 'yes'
security_protocol:
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
description: How SSL certificates should be used for HTTPS requests. defaults to None.
certificate_authority:
description: The CA bundle string with custom certificates. defaults to None.
path:
description: Database name for oVirt metrics. Defaults to ovirt_engine_history.
default: ovirt_engine_history
alerts:
description: Alerts endpoint connection information.
suboptions:
hostname:
description: The provider's api hostname.
required: true
port:
description: The provider's api port.
userid:
description: Provider's api endpoint authentication userid. defaults to None.
password:
description: Provider's api endpoint authentication password. defaults to None.
auth_key:
description: Provider's api endpoint authentication bearer token. defaults to None.
verify_ssl:
description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
default: true
security_protocol:
choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation']
description: How SSL certificates should be used for HTTPS requests. defaults to None.
certificate_authority:
description: The CA bundle string with custom certificates. defaults to None.
ssh_keypair:
description: SSH key pair used for SSH connections to all hosts in this provider.
version_added: "2.5"
suboptions:
hostname:
description: Director hostname.
required: true
userid:
description: SSH username.
auth_key:
description: SSH private key.
'''
EXAMPLES = '''
- name: Create a new provider in ManageIQ ('Hawkular' metrics)
manageiq_provider:
name: 'EngLab'
type: 'OpenShift'
state: 'present'
provider:
auth_key: 'topSecret'
hostname: 'example.com'
port: 8443
verify_ssl: true
security_protocol: 'ssl-with-validation-custom-ca'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
metrics:
auth_key: 'topSecret'
role: 'hawkular'
hostname: 'example.com'
port: 443
verify_ssl: true
security_protocol: 'ssl-with-validation-custom-ca'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
manageiq_connection:
url: 'https://127.0.0.1:80'
username: 'admin'
password: 'password'
verify_ssl: true
- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
manageiq_provider:
name: 'EngLab'
type: 'Openshift'
state: 'present'
provider:
auth_key: 'topSecret'
hostname: 'next.example.com'
port: 8443
verify_ssl: true
security_protocol: 'ssl-with-validation-custom-ca'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
metrics:
auth_key: 'topSecret'
hostname: 'next.example.com'
port: 443
verify_ssl: true
security_protocol: 'ssl-with-validation-custom-ca'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
manageiq_connection:
url: 'https://127.0.0.1'
username: 'admin'
password: 'password'
verify_ssl: true
- name: Delete a provider in ManageIQ
manageiq_provider:
name: 'EngLab'
type: 'Openshift'
state: 'absent'
manageiq_connection:
url: 'https://127.0.0.1'
username: 'admin'
password: 'password'
verify_ssl: true
- name: Create a new Amazon provider in ManageIQ using token authentication
manageiq_provider:
name: 'EngAmazon'
type: 'Amazon'
state: 'present'
provider:
hostname: 'amazon.example.com'
userid: 'hello'
password: 'world'
manageiq_connection:
url: 'https://127.0.0.1'
token: 'VeryLongToken'
verify_ssl: true
- name: Create a new oVirt provider in ManageIQ
manageiq_provider:
name: 'RHEV'
type: 'oVirt'
state: 'present'
provider:
hostname: 'rhev01.example.com'
userid: 'admin@internal'
password: 'password'
verify_ssl: true
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
metrics:
hostname: 'metrics.example.com'
path: 'ovirt_engine_history'
userid: 'user_id_metrics'
password: 'password_metrics'
verify_ssl: true
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
manageiq_connection:
url: 'https://127.0.0.1'
username: 'admin'
password: 'password'
verify_ssl: true
- name: Create a new VMware provider in ManageIQ
manageiq_provider:
name: 'EngVMware'
type: 'VMware'
state: 'present'
provider:
hostname: 'vcenter.example.com'
host_default_vnc_port_start: 5800
host_default_vnc_port_end: 5801
userid: 'root'
password: 'password'
manageiq_connection:
url: 'https://127.0.0.1'
token: 'VeryLongToken'
verify_ssl: true
- name: Create a new Azure provider in ManageIQ
manageiq_provider:
name: 'EngAzure'
type: 'Azure'
provider_region: 'northeurope'
subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
state: 'present'
provider:
hostname: 'azure.example.com'
userid: 'e272bd74-f661-484f-b223-88dd128a4049'
password: 'password'
manageiq_connection:
url: 'https://cf-6af0.rhpds.opentlc.com'
username: 'admin'
password: 'password'
verify_ssl: false
- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
manageiq_provider:
name: 'EngDirector'
type: 'Director'
api_version: 'v3'
state: 'present'
provider:
hostname: 'director.example.com'
userid: 'admin'
password: 'password'
security_protocol: 'ssl-with-validation'
verify_ssl: 'true'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
ssh_keypair:
hostname: director.example.com
userid: heat-admin
auth_key: 'SecretSSHPrivateKey'
- name: Create a new OpenStack provider in ManageIQ with amqp metrics
manageiq_provider:
name: 'EngOpenStack'
type: 'OpenStack'
api_version: 'v3'
state: 'present'
provider_region: 'europe'
tenant_mapping_enabled: 'False'
keystone_v3_domain_id: 'mydomain'
provider:
hostname: 'openstack.example.com'
userid: 'admin'
password: 'password'
security_protocol: 'ssl-with-validation'
verify_ssl: 'true'
certificate_authority: |
-----BEGIN CERTIFICATE-----
FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
-----END CERTIFICATE-----
metrics:
role: amqp
hostname: 'amqp.example.com'
security_protocol: 'non-ssl'
port: 5666
userid: admin
password: password
- name: Create a new GCE provider in ManageIQ
manageiq_provider:
name: 'EngGoogle'
type: 'GCE'
provider_region: 'europe-west1'
project: 'project1'
state: 'present'
provider:
hostname: 'gce.example.com'
auth_key: 'google_json_key'
verify_ssl: 'false'
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
def supported_providers():
return dict(
Openshift=dict(
class_name='ManageIQ::Providers::Openshift::ContainerManager',
authtype='bearer',
default_role='default',
metrics_role='prometheus',
alerts_role='prometheus_alerts',
),
Amazon=dict(
class_name='ManageIQ::Providers::Amazon::CloudManager',
),
oVirt=dict(
class_name='ManageIQ::Providers::Redhat::InfraManager',
default_role='default',
metrics_role='metrics',
),
VMware=dict(
class_name='ManageIQ::Providers::Vmware::InfraManager',
),
Azure=dict(
class_name='ManageIQ::Providers::Azure::CloudManager',
),
Director=dict(
class_name='ManageIQ::Providers::Openstack::InfraManager',
ssh_keypair_role="ssh_keypair"
),
OpenStack=dict(
class_name='ManageIQ::Providers::Openstack::CloudManager',
),
GCE=dict(
class_name='ManageIQ::Providers::Google::CloudManager',
),
)
def endpoint_list_spec():
return dict(
provider=dict(type='dict', options=endpoint_argument_spec()),
metrics=dict(type='dict', options=endpoint_argument_spec()),
alerts=dict(type='dict', options=endpoint_argument_spec()),
ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
)
def endpoint_argument_spec():
return dict(
role=dict(),
hostname=dict(required=True),
port=dict(type='int'),
verify_ssl=dict(default=True, type='bool'),
certificate_authority=dict(),
security_protocol=dict(
choices=[
'ssl-with-validation',
'ssl-with-validation-custom-ca',
'ssl-without-validation',
'non-ssl',
],
),
userid=dict(),
password=dict(no_log=True),
auth_key=dict(no_log=True),
subscription=dict(no_log=True),
project=dict(),
uid_ems=dict(),
path=dict(),
)
def delete_nulls(h):
""" Remove null entries from a hash
Returns:
a hash without nulls
"""
if isinstance(h, list):
return map(delete_nulls, h)
if isinstance(h, dict):
return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
return h
class ManageIQProvider(object):
"""
Object to execute provider management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def class_name_to_type(self, class_name):
""" Convert class_name to type
Returns:
the type
"""
out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
if len(out) == 1:
return out[0]
return None
def zone_id(self, name):
""" Search for zone id by zone name.
Returns:
the zone id, or send a module Fail signal if zone not found.
"""
zone = self.manageiq.find_collection_resource_by('zones', name=name)
if not zone: # zone doesn't exist
self.module.fail_json(
msg="zone %s does not exist in manageiq" % (name))
return zone['id']
def provider(self, name):
""" Search for provider object by name.
Returns:
the provider, or None if provider not found.
"""
return self.manageiq.find_collection_resource_by('providers', name=name)
def build_connection_configurations(self, provider_type, endpoints):
""" Build "connection_configurations" objects from
requested endpoints provided by user
Returns:
the user requested provider endpoints list
"""
connection_configurations = []
endpoint_keys = endpoint_list_spec().keys()
provider_defaults = supported_providers().get(provider_type, {})
# get endpoint defaults
endpoint = endpoints.get('provider')
default_auth_key = endpoint.get('auth_key')
# build a connection_configuration object for each endpoint
for endpoint_key in endpoint_keys:
endpoint = endpoints.get(endpoint_key)
if endpoint:
# get role and authtype
role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
if role == 'default':
authtype = provider_defaults.get('authtype') or role
else:
authtype = role
# set a connection_configuration
connection_configurations.append({
'endpoint': {
'role': role,
'hostname': endpoint.get('hostname'),
'port': endpoint.get('port'),
'verify_ssl': [0, 1][endpoint.get('verify_ssl', True)],
'security_protocol': endpoint.get('security_protocol'),
'certificate_authority': endpoint.get('certificate_authority'),
'path': endpoint.get('path'),
},
'authentication': {
'authtype': authtype,
'userid': endpoint.get('userid'),
'password': endpoint.get('password'),
'auth_key': endpoint.get('auth_key') or default_auth_key,
}
})
return connection_configurations
def delete_provider(self, provider):
""" Deletes a provider from manageiq.
Returns:
a short message describing the operation executed.
"""
try:
url = '%s/providers/%s' % (self.api_url, provider['id'])
result = self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
return dict(changed=True, msg=result['message'])
def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
host_default_vnc_port_start, host_default_vnc_port_end,
subscription, project, uid_ems, tenant_mapping_enabled, api_version):
""" Edit a provider from manageiq.
Returns:
a short message describing the operation executed.
"""
url = '%s/providers/%s' % (self.api_url, provider['id'])
resource = dict(
name=name,
zone={'id': zone_id},
provider_region=provider_region,
connection_configurations=endpoints,
host_default_vnc_port_start=host_default_vnc_port_start,
host_default_vnc_port_end=host_default_vnc_port_end,
subscription=subscription,
project=project,
uid_ems=uid_ems,
tenant_mapping_enabled=tenant_mapping_enabled,
api_version=api_version,
)
# NOTE: we do not check for diff's between requested and current
# provider, we always submit endpoints with password or auth_keys,
# since we can not compare with current password or auth_key,
# every edit request is sent to ManageIQ API without compareing
# it to current state.
# clean nulls, we do not send nulls to the api
resource = delete_nulls(resource)
# try to update provider
try:
result = self.client.post(url, action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
return dict(
changed=True,
msg="successfully updated the provider %s: %s" % (provider['name'], result))
def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
host_default_vnc_port_start, host_default_vnc_port_end,
subscription, project, uid_ems, tenant_mapping_enabled, api_version):
""" Creates the provider in manageiq.
Returns:
a short message describing the operation executed.
"""
resource = dict(
name=name,
zone={'id': zone_id},
provider_region=provider_region,
host_default_vnc_port_start=host_default_vnc_port_start,
host_default_vnc_port_end=host_default_vnc_port_end,
subscription=subscription,
project=project,
uid_ems=uid_ems,
tenant_mapping_enabled=tenant_mapping_enabled,
api_version=api_version,
connection_configurations=endpoints,
)
# clean nulls, we do not send nulls to the api
resource = delete_nulls(resource)
# try to create a new provider
try:
url = '%s/providers' % (self.api_url)
result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
except Exception as e:
self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
return dict(
changed=True,
msg="successfully created the provider %s: %s" % (name, result['results']))
def refresh(self, provider, name):
""" Trigger provider refresh.
Returns:
a short message describing the operation executed.
"""
try:
url = '%s/providers/%s' % (self.api_url, provider['id'])
result = self.client.post(url, action='refresh')
except Exception as e:
self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
return dict(
changed=True,
msg="refreshing provider %s" % name)
def main():
zone_id = None
endpoints = []
argument_spec = dict(
state=dict(choices=['absent', 'present', 'refresh'], default='present'),
name=dict(required=True),
zone=dict(default='default'),
provider_region=dict(),
host_default_vnc_port_start=dict(),
host_default_vnc_port_end=dict(),
subscription=dict(),
project=dict(),
azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
tenant_mapping_enabled=dict(default=False, type='bool'),
api_version=dict(choices=['v2', 'v3']),
type=dict(choices=supported_providers().keys()),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
# add the endpoint arguments to the arguments
argument_spec.update(endpoint_list_spec())
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['provider']),
('state', 'refresh', ['name'])],
required_together=[
['host_default_vnc_port_start', 'host_default_vnc_port_end']
],
)
name = module.params['name']
zone_name = module.params['zone']
provider_type = module.params['type']
raw_endpoints = module.params
provider_region = module.params['provider_region']
host_default_vnc_port_start = module.params['host_default_vnc_port_start']
host_default_vnc_port_end = module.params['host_default_vnc_port_end']
subscription = module.params['subscription']
uid_ems = module.params['azure_tenant_id']
project = module.params['project']
tenant_mapping_enabled = module.params['tenant_mapping_enabled']
api_version = module.params['api_version']
state = module.params['state']
manageiq = ManageIQ(module)
manageiq_provider = ManageIQProvider(manageiq)
provider = manageiq_provider.provider(name)
# provider should not exist
if state == "absent":
# if we have a provider, delete it
if provider:
res_args = manageiq_provider.delete_provider(provider)
# if we do not have a provider, nothing to do
else:
res_args = dict(
changed=False,
msg="provider %s: does not exist in manageiq" % (name))
# provider should exist
if state == "present":
# get data user did not explicitly give
if zone_name:
zone_id = manageiq_provider.zone_id(zone_name)
# if we do not have a provider_type, use the current provider_type
if provider and not provider_type:
provider_type = manageiq_provider.class_name_to_type(provider['type'])
# check supported_providers types
if not provider_type:
manageiq_provider.module.fail_json(
msg="missing required argument: provider_type")
# check supported_providers types
if provider_type not in supported_providers().keys():
manageiq_provider.module.fail_json(
msg="provider_type %s is not supported" % (provider_type))
# build "connection_configurations" objects from user requsted endpoints
# "provider" is a required endpoint, if we have it, we have endpoints
if raw_endpoints.get("provider"):
endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
# if we have a provider, edit it
if provider:
res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
host_default_vnc_port_start, host_default_vnc_port_end,
subscription, project, uid_ems, tenant_mapping_enabled, api_version)
# if we do not have a provider, create it
else:
res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
host_default_vnc_port_start, host_default_vnc_port_end,
subscription, project, uid_ems, tenant_mapping_enabled, api_version)
# refresh provider (trigger sync)
if state == "refresh":
if provider:
res_args = manageiq_provider.refresh(provider, name)
else:
res_args = dict(
changed=False,
msg="provider %s: does not exist in manageiq" % (name))
module.exit_json(**res_args)
if __name__ == "__main__":
main()
|
eduardolujan/django_project_template
|
refs/heads/master
|
django_project_template/django_project_template/urls.py
|
1
|
from django.conf.urls.static import static
from django.conf.urls import patterns, url, include
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'', include('django_project_template.apps.app.urls')),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG and settings.MEDIA_ROOT:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
indictranstech/phr-frappe
|
refs/heads/develop
|
frappe/core/doctype/property_setter/__init__.py
|
2292
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
dendisuhubdy/tensorflow
|
refs/heads/master
|
tensorflow/contrib/crf/python/kernel_tests/crf_test.py
|
10
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CRF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.crf.python.ops import crf
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CrfTest(test.TestCase):
def testCrfSequenceScore(self):
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
# Test both the length-1 and regular cases.
sequence_lengths_list = [
np.array(3, dtype=np.int32),
np.array(1, dtype=np.int32)
]
inputs_list = [
np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],
dtype=np.float32),
np.array([[4, 5, -3]],
dtype=np.float32),
]
tag_indices_list = [
np.array([1, 2, 1, 0], dtype=np.int32),
np.array([1], dtype=np.int32)
]
for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list,
inputs_list,
tag_indices_list):
with self.test_session() as sess:
sequence_score = crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
tf_sequence_score = sess.run(sequence_score)
expected_unary_score = sum(inputs[i][tag_indices[i]]
for i in range(sequence_lengths))
expected_binary_score = sum(
transition_params[tag_indices[i], tag_indices[i + 1]]
for i in range(sequence_lengths - 1))
expected_sequence_score = expected_unary_score + expected_binary_score
self.assertAllClose(tf_sequence_score, expected_sequence_score)
def testCrfUnaryScore(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
for dtype in (np.int32, np.int64):
tag_indices = np.array([1, 2, 1, 0], dtype=dtype)
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
unary_score = crf.crf_unary_score(
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
inputs=array_ops.expand_dims(inputs, 0))
unary_score = array_ops.squeeze(unary_score, [0])
tf_unary_score = sess.run(unary_score)
expected_unary_score = sum(inputs[i][tag_indices[i]]
for i in range(sequence_lengths))
self.assertAllClose(tf_unary_score, expected_unary_score)
def testCrfBinaryScore(self):
tag_indices = np.array([1, 2, 1, 0], dtype=np.int32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
binary_score = crf.crf_binary_score(
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
binary_score = array_ops.squeeze(binary_score, [0])
tf_binary_score = sess.run(binary_score)
expected_binary_score = sum(
transition_params[tag_indices[i], tag_indices[i + 1]]
for i in range(sequence_lengths - 1))
self.assertAllClose(tf_binary_score, expected_binary_score)
def testCrfLogNorm(self):
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
# Test both the length-1 and regular cases.
sequence_lengths_list = [
np.array(3, dtype=np.int32),
np.array(1, dtype=np.int32)
]
inputs_list = [
np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],
dtype=np.float32),
np.array([[3, -1, 3]],
dtype=np.float32),
]
tag_indices_list = [
np.array([1, 2, 1, 0], dtype=np.int32),
np.array([2], dtype=np.int32)
]
for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list,
inputs_list,
tag_indices_list):
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_scores = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequence_scores.append(
crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params)))
brute_force_log_norm = math_ops.reduce_logsumexp(all_sequence_scores)
log_norm = crf.crf_log_norm(
inputs=array_ops.expand_dims(inputs, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
log_norm = array_ops.squeeze(log_norm, [0])
tf_brute_force_log_norm, tf_log_norm = sess.run(
[brute_force_log_norm, log_norm])
self.assertAllClose(tf_log_norm, tf_brute_force_log_norm)
def testCrfLogNormZeroSeqLength(self):
"""
Test `crf_log_norm` when `sequence_lengths` contains one or more zeros.
"""
with self.test_session() as sess:
inputs = constant_op.constant(np.ones([2, 10, 5],
dtype=np.float32))
transition_params = constant_op.constant(np.ones([5, 5],
dtype=np.float32))
sequence_lengths = constant_op.constant(np.zeros([2],
dtype=np.int32))
expected_log_norm = np.zeros([2], dtype=np.float32)
log_norm = crf.crf_log_norm(inputs, sequence_lengths, transition_params)
tf_log_norm = sess.run(log_norm)
self.assertAllClose(tf_log_norm, expected_log_norm)
def testCrfLogLikelihood(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_log_likelihoods = []
# Make sure all probabilities sum to 1.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
sequence_log_likelihood, _ = crf.crf_log_likelihood(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
all_sequence_log_likelihoods.append(sequence_log_likelihood)
total_log_likelihood = math_ops.reduce_logsumexp(
all_sequence_log_likelihoods)
tf_total_log_likelihood = sess.run(total_log_likelihood)
self.assertAllClose(tf_total_log_likelihood, 0.0)
def testViterbiDecode(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_scores = []
all_sequences = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequences.append(tag_indices)
sequence_score = crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
all_sequence_scores.append(sequence_score)
tf_all_sequence_scores = sess.run(all_sequence_scores)
expected_max_sequence_index = np.argmax(tf_all_sequence_scores)
expected_max_sequence = all_sequences[expected_max_sequence_index]
expected_max_score = tf_all_sequence_scores[expected_max_sequence_index]
actual_max_sequence, actual_max_score = crf.viterbi_decode(
inputs[:sequence_lengths], transition_params)
self.assertAllClose(actual_max_score, expected_max_score)
self.assertEqual(actual_max_sequence,
expected_max_sequence[:sequence_lengths])
def testCrfDecode(self):
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
# Test both the length-1 and regular cases.
sequence_lengths_list = [
np.array(3, dtype=np.int32),
np.array(1, dtype=np.int32)
]
inputs_list = [
np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],
dtype=np.float32),
np.array([[-1, 2, 1]],
dtype=np.float32),
]
tag_indices_list = [
np.array([1, 2, 1, 0], dtype=np.int32),
np.array([2], dtype=np.int32)
]
for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list,
inputs_list,
tag_indices_list):
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_scores = []
all_sequences = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequences.append(tag_indices)
sequence_score = crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
all_sequence_scores.append(sequence_score)
tf_all_sequence_scores = sess.run(all_sequence_scores)
expected_max_sequence_index = np.argmax(tf_all_sequence_scores)
expected_max_sequence = all_sequences[expected_max_sequence_index]
expected_max_score = tf_all_sequence_scores[expected_max_sequence_index]
actual_max_sequence, actual_max_score = crf.crf_decode(
array_ops.expand_dims(inputs, 0),
constant_op.constant(transition_params),
array_ops.expand_dims(sequence_lengths, 0))
actual_max_sequence = array_ops.squeeze(actual_max_sequence, [0])
actual_max_score = array_ops.squeeze(actual_max_score, [0])
tf_actual_max_sequence, tf_actual_max_score = sess.run(
[actual_max_sequence, actual_max_score])
self.assertAllClose(tf_actual_max_score, expected_max_score)
self.assertEqual(list(tf_actual_max_sequence[:sequence_lengths]),
expected_max_sequence[:sequence_lengths])
def testCrfDecodeZeroSeqLength(self):
"""
Test that crf_decode works when sequence_length contains one or more zeros.
"""
with self.test_session() as sess:
inputs = constant_op.constant(np.ones([2, 10, 5],
dtype=np.float32))
transition_params = constant_op.constant(np.ones([5, 5],
dtype=np.float32))
sequence_lengths = constant_op.constant(np.zeros([2],
dtype=np.int32))
tags, scores = crf.crf_decode(inputs, transition_params, sequence_lengths)
tf_tags, tf_scores = sess.run([tags, scores])
self.assertEqual(len(tf_tags.shape), 2)
self.assertEqual(len(tf_scores.shape), 1)
if __name__ == "__main__":
test.main()
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.7.2/Lib/test/test_binhex.py
|
193
|
#! /usr/bin/env python
"""Test script for the binhex C module
Uses the mechanism of the python binhex module
Based on an original test by Roger E. Masse.
"""
import binhex
import os
import unittest
from test import test_support
class BinHexTestCase(unittest.TestCase):
def setUp(self):
self.fname1 = test_support.TESTFN + "1"
self.fname2 = test_support.TESTFN + "2"
def tearDown(self):
try: os.unlink(self.fname1)
except OSError: pass
try: os.unlink(self.fname2)
except OSError: pass
DATA = 'Jack is my hero'
def test_binhex(self):
f = open(self.fname1, 'w')
f.write(self.DATA)
f.close()
binhex.binhex(self.fname1, self.fname2)
binhex.hexbin(self.fname2, self.fname1)
f = open(self.fname1, 'r')
finish = f.readline()
f.close()
self.assertEqual(self.DATA, finish)
def test_main():
test_support.run_unittest(BinHexTestCase)
if __name__ == "__main__":
test_main()
|
c86j224s/snippet
|
refs/heads/master
|
Python_asyncio_binary_echo/pyclient2/Lib/site-packages/pkg_resources/__init__.py
|
20
|
# coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
raise RuntimeError("Python 3.3 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(
importlib_machinery,
'SourceFileLoader',
type(None),
)
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
if not isinstance(orig_path, list):
# Is this behavior useful when module.__path__ is not a list?
return
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
athompso/ansible
|
refs/heads/devel
|
contrib/inventory/proxmox.py
|
43
|
#!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
import os
import sys
from optparse import OptionParser
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxQemu(dict):
def get_variables(self):
variables = {}
for key, value in self.iteritems():
variables['proxmox_' + key] = value
return variables
class ProxmoxQemuList(list):
def __init__(self, data=[]):
for item in data:
self.append(ProxmoxQemu(item))
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
def get_by_name(self, name):
results = [qemu for qemu in self if qemu['name'] == name]
return results[0] if len(results) > 0 else None
def get_variables(self):
variables = {}
for qemu in self:
variables[qemu['name']] = qemu.get_variables()
return variables
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{}api2/json/access/ticket'.format(self.options.url)
request_params = urllib.urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(urllib2.urlopen(request_path, request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
request_path = '{}{}'.format(self.options.url, url)
request = opener.open(request_path, data)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def node_qemu(self, node):
return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
results = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
}
}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
results['all']['hosts'] += qemu_list.get_names()
results['_meta']['hostvars'].update(qemu_list.get_variables())
# pools
for pool in proxmox_api.pools().get_names():
results[pool] = {
'hosts': proxmox_api.pool(pool).get_members_name(),
}
return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
return qemu.get_variables()
return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
(options, args) = parser.parse_args()
if options.list:
data = main_list(options)
elif options.host:
data = main_host(options)
else:
parser.print_help()
sys.exit(1)
indent = None
if options.pretty:
indent = 2
print json.dumps(data, indent=indent)
if __name__ == '__main__':
main()
|
richardnpaul/FWL-Website
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/tests/test_sandbox.py
|
204
|
"""develop tests
"""
import sys
import os
import shutil
import unittest
import tempfile
from setuptools.sandbox import DirectorySandbox, SandboxViolation
def has_win32com():
"""
Run this to determine if the local machine has win32com, and if it
does, include additional tests.
"""
if not sys.platform.startswith('win32'):
return False
try:
mod = __import__('win32com')
except ImportError:
return False
return True
class TestSandbox(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir)
def test_devnull(self):
if sys.version < '2.4':
return
sandbox = DirectorySandbox(self.dir)
sandbox.run(self._file_writer(os.devnull))
def _file_writer(path):
def do_write():
f = open(path, 'w')
f.write('xxx')
f.close()
return do_write
_file_writer = staticmethod(_file_writer)
if has_win32com():
def test_win32com(self):
"""
win32com should not be prevented from caching COM interfaces
in gen_py.
"""
import win32com
gen_py = win32com.__gen_path__
target = os.path.join(gen_py, 'test_write')
sandbox = DirectorySandbox(self.dir)
try:
try:
sandbox.run(self._file_writer(target))
except SandboxViolation:
self.fail("Could not create gen_py file due to SandboxViolation")
finally:
if os.path.exists(target): os.remove(target)
if __name__ == '__main__':
unittest.main()
|
pilou-/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/hostname.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hostname
author:
- Adrian Likins (@alikins)
- Hideki Saito (@saito-hideki)
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname, supports most OSs/Distributions, including those using systemd.
- Note, this module does *NOT* modify C(/etc/hosts). You need to modify it yourself using other modules like template or replace.
- Windows, HP-UX and AIX are not currently supported.
options:
name:
description:
- Name of the host
required: true
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import os
import socket
import traceback
from ansible.module_utils.basic import (
AnsibleModule,
get_distribution,
get_distribution_version,
get_platform,
load_platform_subclass,
)
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils._text import to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
if not self.module.check_mode:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Sles'
try:
distribution_version = get_distribution_version()
# cast to float may raise ValueError on non SLES, we use float for a little more safety over int
if distribution_version and 10 <= float(distribution_version) <= 12:
strategy_class = SLESStrategy
else:
raise ValueError()
except ValueError:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse'
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CloudlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle'
strategy_class = RedHatStrategy
class VirtuozzoLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class DevuanHostname(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = DebianStrategy
class RaspbianHostname(Hostname):
platform = 'Linux'
distribution = 'Raspbian'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NeonHostname(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = DebianStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True)
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
elif name != permanent_hostname:
name_before = permanent_hostname
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
gtest-org/test10
|
refs/heads/master
|
jenkins_jobs/modules/parameters.py
|
8
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Parameters module allows you to specify build parameters for a job.
**Component**: parameters
:Macro: parameter
:Entry Point: jenkins_jobs.parameters
Example::
job:
name: test_job
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
def base_param(parser, xml_parent, data, do_default, ptype):
pdef = XML.SubElement(xml_parent, ptype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
if do_default:
default = data.get('default', None)
if default:
XML.SubElement(pdef, 'defaultValue').text = default
else:
XML.SubElement(pdef, 'defaultValue')
return pdef
def string_param(parser, xml_parent, data):
"""yaml: string
A string parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.StringParameterDefinition')
def password_param(parser, xml_parent, data):
"""yaml: password
A password parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- password:
name: FOO
default: 1HSC0Ts6E161FysGf+e1xasgsHkgleLh09JUTYnipPvw=
description: "A parameter named FOO."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.PasswordParameterDefinition')
def bool_param(parser, xml_parent, data):
"""yaml: bool
A boolean parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- bool:
name: FOO
default: false
description: "A parameter named FOO, defaults to 'false'."
"""
data['default'] = str(data.get('default', False)).lower()
base_param(parser, xml_parent, data, True,
'hudson.model.BooleanParameterDefinition')
def file_param(parser, xml_parent, data):
"""yaml: file
A file parameter.
:arg str name: the target location for the file upload
:arg str description: a description of the parameter (optional)
Example::
parameters:
- file:
name: test.txt
description: "Upload test.txt."
"""
base_param(parser, xml_parent, data, False,
'hudson.model.FileParameterDefinition')
def text_param(parser, xml_parent, data):
"""yaml: text
A text parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- text:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.TextParameterDefinition')
def label_param(parser, xml_parent, data):
"""yaml: label
A node label parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- label:
name: node
default: precise
description: "The node on which to run the job"
"""
base_param(parser, xml_parent, data, True,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'LabelParameterDefinition')
def choice_param(parser, xml_parent, data):
"""yaml: choice
A single selection parameter.
:arg str name: the name of the parameter
:arg list choices: the available choices
:arg str description: a description of the parameter (optional)
Example::
parameters:
- choice:
name: project
choices:
- nova
- glance
description: "On which project to run?"
"""
pdef = base_param(parser, xml_parent, data, False,
'hudson.model.ChoiceParameterDefinition')
choices = XML.SubElement(pdef, 'choices',
{'class': 'java.util.Arrays$ArrayList'})
a = XML.SubElement(choices, 'a', {'class': 'string-array'})
for choice in data['choices']:
XML.SubElement(a, 'string').text = choice
def validating_string_param(parser, xml_parent, data):
"""yaml: validating-string
A validating string parameter
Requires the Jenkins `Validating String Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Validating+String+Parameter+Plugin>`_
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str regex: a regular expression to validate the string
:arg str msg: a message to display upon failed validation
Example::
parameters:
- validating-string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
regex: [A-Za-z]*
msg: Your entered value failed validation
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.plugins.validating__string__parameter.'
'ValidatingStringParameterDefinition')
XML.SubElement(pdef, 'regex').text = data['regex']
XML.SubElement(pdef, 'failedValidationMessage').text = data['msg']
def svn_tags_param(parser, xml_parent, data):
"""yaml: svn-tags
A svn tag parameter
Requires the Jenkins `Parameterized Trigger Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Parameterized+Trigger+Plugin>`_
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str url: the url to list tags from
:arg str filter: the regular expression to filter tags
Example::
parameters:
- svn-tags:
name: BRANCH_NAME
default: release
description: A parameter named BRANCH_NAME default is release
url: http://svn.example.com/repo
filter: [A-za-z0-9]*
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.scm.listtagsparameter.'
'ListSubversionTagsParameterDefinition')
XML.SubElement(pdef, 'tagsDir').text = data['url']
XML.SubElement(pdef, 'tagsFilter').text = data.get('filter', None)
XML.SubElement(pdef, 'reverseByDate').text = "true"
XML.SubElement(pdef, 'reverseByName').text = "false"
XML.SubElement(pdef, 'maxTags').text = "100"
XML.SubElement(pdef, 'uuid').text = "1-1-1-1-1"
def dynamic_choice_param(parser, xml_parent, data):
"""yaml: dynamic-choice
Dynamic Choice Parameter
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices.
:arg bool remote: the script will be executed on the slave where the build
is started (default is false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default is false)
Example::
parameters:
- dynamic-choice:
name: OPTIONS
description: "Available options"
script: "['optionA', 'optionB']"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'ChoiceParameterDefinition')
def dynamic_string_param(parser, xml_parent, data):
"""yaml: dynamic-string
Dynamic Parameter
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices
:arg bool remote: the script will be executed on the slave where the build
is started (default is false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default is false)
Example::
parameters:
- dynamic-string:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script: "bar"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'StringParameterDefinition')
def dynamic_choice_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-choice-scriptler
Dynamic Choice Parameter (Scriptler)
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default is false)
:arg bool read-only: user can't modify parameter once populated
(default is false)
Example::
parameters:
- dynamic-choice-scriptler:
name: OPTIONS
description: "Available options"
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerChoiceParameterDefinition')
def dynamic_string_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-string-scriptler
Dynamic Parameter (Scriptler)
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default is false)
:arg bool read-only: user can't modify parameter once populated
(default is false)
Example::
parameters:
- dynamic-string-scriptler:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerStringParameterDefinition')
def dynamic_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
+ ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__script').text = data.get('script', None)
localBaseDir = XML.SubElement(pdef, '__localBaseDirectory',
{'serialization': 'custom'})
filePath = XML.SubElement(localBaseDir, 'hudson.FilePath')
default = XML.SubElement(filePath, 'default')
XML.SubElement(filePath, 'boolean').text = "true"
XML.SubElement(default, 'remote').text = \
"/var/lib/jenkins/dynamic_parameter/classpath"
XML.SubElement(pdef, '__remoteBaseDirectory').text = \
"dynamic_parameter_classpath"
XML.SubElement(pdef, '__classPath').text = data.get('classpath', None)
XML.SubElement(pdef, 'readonlyInputField').text = str(
data.get('read-only', False)).lower()
def dynamic_scriptler_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
'scriptler.' + ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__scriptlerScriptId').text = data.get(
'script-id', None)
parametersXML = XML.SubElement(pdef, '__parameters')
parameters = data.get('parameters', [])
if parameters:
for parameter in parameters:
parameterXML = XML.SubElement(parametersXML,
'com.seitenbau.jenkins.plugins.'
'dynamicparameter.scriptler.'
'ScriptlerParameterDefinition_'
'-ScriptParameter')
XML.SubElement(parameterXML, 'name').text = parameter['name']
XML.SubElement(parameterXML, 'value').text = parameter['value']
XML.SubElement(pdef, 'readonlyInputField').text = str(data.get(
'read-only', False)).lower()
class Parameters(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'parameter'
component_list_type = 'parameters'
def gen_xml(self, parser, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
parameters = data.get('parameters', [])
if parameters:
pdefp = XML.SubElement(properties,
'hudson.model.ParametersDefinitionProperty')
pdefs = XML.SubElement(pdefp, 'parameterDefinitions')
for param in parameters:
self.registry.dispatch('parameter',
parser, pdefs, param)
|
mt2d2/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/tokenizer.py
|
1710
|
from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
crossbario/crossbar-examples
|
refs/heads/master
|
demos/votes/kivy/kivy/main.py
|
3
|
"""
A Kivy front end component for the WAMP Votes demo application.
The Votes application collects votes for our favourite flavours of ice cream.
It is powered by a back end WAMP component that runs on the address as
defined in the start_wamp_component() function.
See
https://github.com/crossbario/crossbarexamples/tree/master/votes
on how to start up your own back end component and Crossbar router.
In this front end component, click a button with an image background to vote
for the flavour it denotes.
Reset votes to zero by clicking the bottom button.
Open up a browser window with another front end component to see the numbers
change.
"""
# copyright Roger Erens, the Apache 2.0 license applies
# Kivy's install_twisted_reactor MUST be called early on!
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from twisted.internet.defer import inlineCallbacks
class VotesWampComponent(ApplicationSession):
"""
A WAMP application component which is run from the Kivy UI.
"""
def onJoin(self, details):
# print("session ready", self.config.extra)
# get the Kivy UI component this session was started from
ui = self.config.extra['ui']
ui.on_session(self)
# subscribe to WAMP PubSub events and call the Kivy UI component's
# function when such an event is received
self.subscribe(ui.on_vote_message, 'io.crossbar.demo.vote.onvote')
self.subscribe(ui.on_reset_message, 'io.crossbar.demo.vote.onreset')
class VotesRoot(BoxLayout):
"""
The Root widget, defined in conjunction with the rule in votes.kv.
"""
votes_container = ObjectProperty(None) # Used to refer to the widget that
# holds the VotesWidgets
votes = {} # Since votes_container is a list, we need a dictionary for
# bookkeeping, connecting each flavour to its VotesWidget
def add_vote_widget(self, name):
"""
Dynamically create and add a VoteWidget to the container in Root widget.
Updates the list self.votes_container and the dictionary self.votes.
"""
vote_widget = Factory.VoteWidget()
vote_widget.name = name
self.votes_container.add_widget(vote_widget)
self.votes[name] = self.votes_container.children[0]
@inlineCallbacks
def on_session(self, session):
"""
Called from WAMP session when attached to Crossbar router.
"""
self.session = session
# obtain a list of dictionaries with number of votes for each
# subject (i.e. flavour)
votes_results = yield self.session.call('io.crossbar.demo.vote.get')
for vote in votes_results:
self.votes[vote['subject']].amount = vote['votes']
def send_vote(self, name):
"""
Called from VoteWidget's top button.
Only send the name of the flavour that gets an extra vote: the back end
will update its number of votes and publish the updated number.
"""
if self.session:
self.session.call('io.crossbar.demo.vote.vote', name)
def send_reset(self):
"""
Called from VotesRoot bottom button.
"""
if self.session:
self.session.call('io.crossbar.demo.vote.reset')
def on_vote_message(self, vote_result):
"""
Called from VotesWampComponent when Crossbar router published vote event.
"""
self.votes[vote_result['subject']].amount = vote_result['votes']
def on_reset_message(self):
"""
Called from VotesWampComponent when Crossbar router published reset event.
"""
for vote_widget in self.votes_container.children:
vote_widget.amount = 0
class VotesApp(App):
def build(self):
self.root = VotesRoot()
flavours = ['Banana', 'Chocolate', 'Lemon'] # If you adapt this
# list, also adapt it
# in the back end
for flavour in flavours:
self.root.add_vote_widget(name=flavour)
self.start_wamp_component()
return self.root
def start_wamp_component(self):
"""
Create a WAMP session and start the WAMP component
"""
self.session = None
# adapt to fit the Crossbar.io instance yo're using
url, realm = "ws://localhost:8080/ws", "crossbardemo"
# Create our WAMP application component
runner = ApplicationRunner(url=url,
realm=realm,
extra=dict(ui=self.root))
# Start our WAMP application component without starting the reactor because
# that was already started by kivy
runner.run(VotesWampComponent, start_reactor=False)
if __name__ == '__main__':
VotesApp().run()
|
qedi-r/home-assistant
|
refs/heads/dev
|
tests/components/homematicip_cloud/conftest.py
|
1
|
"""Initializer helpers for HomematicIP fake server."""
from asynctest import MagicMock, Mock, patch
from homematicip.aio.auth import AsyncAuth
from homematicip.aio.connection import AsyncConnection
from homematicip.aio.home import AsyncHome
import pytest
from homeassistant import config_entries
from homeassistant.components.homematicip_cloud import (
DOMAIN as HMIPC_DOMAIN,
async_setup as hmip_async_setup,
const as hmipc,
hap as hmip_hap,
)
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .helper import AUTH_TOKEN, HAPID, HAPPIN, HomeTemplate
from tests.common import MockConfigEntry, mock_coro
@pytest.fixture(name="mock_connection")
def mock_connection_fixture() -> AsyncConnection:
"""Return a mocked connection."""
connection = MagicMock(spec=AsyncConnection)
def _rest_call_side_effect(path, body=None):
return path, body
connection._restCall.side_effect = _rest_call_side_effect # pylint: disable=W0212
connection.api_call.return_value = mock_coro(True)
connection.init.side_effect = mock_coro(True)
return connection
@pytest.fixture(name="hmip_config_entry")
def hmip_config_entry_fixture() -> config_entries.ConfigEntry:
"""Create a mock config entriy for homematic ip cloud."""
entry_data = {
hmipc.HMIPC_HAPID: HAPID,
hmipc.HMIPC_AUTHTOKEN: AUTH_TOKEN,
hmipc.HMIPC_NAME: "",
hmipc.HMIPC_PIN: HAPPIN,
}
config_entry = MockConfigEntry(
version=1,
domain=HMIPC_DOMAIN,
title=HAPID,
data=entry_data,
source="import",
connection_class=config_entries.CONN_CLASS_CLOUD_PUSH,
system_options={"disable_new_entities": False},
)
return config_entry
@pytest.fixture(name="default_mock_home")
def default_mock_home_fixture(mock_connection) -> AsyncHome:
"""Create a fake homematic async home."""
return HomeTemplate(connection=mock_connection).init_home().get_async_home_mock()
@pytest.fixture(name="default_mock_hap")
async def default_mock_hap_fixture(
hass: HomeAssistantType, mock_connection, hmip_config_entry
) -> hmip_hap.HomematicipHAP:
"""Create a mocked homematic access point."""
return await get_mock_hap(hass, mock_connection, hmip_config_entry)
async def get_mock_hap(
hass: HomeAssistantType,
mock_connection,
hmip_config_entry: config_entries.ConfigEntry,
) -> hmip_hap.HomematicipHAP:
"""Create a mocked homematic access point."""
hass.config.components.add(HMIPC_DOMAIN)
hap = hmip_hap.HomematicipHAP(hass, hmip_config_entry)
home_name = hmip_config_entry.data["name"]
mock_home = (
HomeTemplate(connection=mock_connection, home_name=home_name)
.init_home()
.get_async_home_mock()
)
with patch.object(hap, "get_hap", return_value=mock_coro(mock_home)):
assert await hap.async_setup()
mock_home.on_update(hap.async_update)
mock_home.on_create(hap.async_create_entity)
hass.data[HMIPC_DOMAIN] = {HAPID: hap}
await hass.async_block_till_done()
return hap
@pytest.fixture(name="hmip_config")
def hmip_config_fixture() -> ConfigType:
"""Create a config for homematic ip cloud."""
entry_data = {
hmipc.HMIPC_HAPID: HAPID,
hmipc.HMIPC_AUTHTOKEN: AUTH_TOKEN,
hmipc.HMIPC_NAME: "",
hmipc.HMIPC_PIN: HAPPIN,
}
return {HMIPC_DOMAIN: [entry_data]}
@pytest.fixture(name="dummy_config")
def dummy_config_fixture() -> ConfigType:
"""Create a dummy config."""
return {"blabla": None}
@pytest.fixture(name="mock_hap_with_service")
async def mock_hap_with_service_fixture(
hass: HomeAssistantType, default_mock_hap, dummy_config
) -> hmip_hap.HomematicipHAP:
"""Create a fake homematic access point with hass services."""
await hmip_async_setup(hass, dummy_config)
await hass.async_block_till_done()
hass.data[HMIPC_DOMAIN] = {HAPID: default_mock_hap}
return default_mock_hap
@pytest.fixture(name="simple_mock_home")
def simple_mock_home_fixture() -> AsyncHome:
"""Return a simple AsyncHome Mock."""
return Mock(
spec=AsyncHome,
devices=[],
groups=[],
location=Mock(),
weather=Mock(create=True),
id=42,
dutyCycle=88,
connected=True,
)
@pytest.fixture(name="simple_mock_auth")
def simple_mock_auth_fixture() -> AsyncAuth:
"""Return a simple AsyncAuth Mock."""
return Mock(spec=AsyncAuth, pin=HAPPIN, create=True)
|
KimNorgaard/ansible-modules-extras
|
refs/heads/devel
|
network/a10/a10_service_group.py
|
117
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks devices' service groups
description:
- Manage slb service-group objects on A10 Networks devices via aXAPI
author: "Mischa Peters (@mischapeters)"
notes:
- Requires A10 Networks aXAPI 2.1
- When a server doesn't exist and is added to the service-group the server will be created
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
service_group:
description:
- slb service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
choices: []
service_group_protocol:
description:
- slb service-group protocol
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- slb service-group loadbalancing method
required: false
default: round-robin
aliases: ['method']
choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
aliases: []
choices: []
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
main()
|
fitermay/intellij-community
|
refs/heads/master
|
python/testData/formatter/forceSpacesAroundEqualSignInAnnotatedParameter.py
|
70
|
def test(x = 1, y: int = 2, z: int=3):
pass
|
hkchenhongyi/django
|
refs/heads/master
|
tests/model_validation/models.py
|
260
|
from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
thing2 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class FKRel(models.Model):
thing1 = models.ForeignKey(ThingWithIterableChoices, models.CASCADE, related_name='+')
thing2 = models.ForeignKey(ThingWithIterableChoices, models.CASCADE, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
|
TeamWin/android_kernel_samsung_zerolteeu
|
refs/heads/android-5.1
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
nikesh-mahalka/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.py
|
81
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
table = Table('project_user_quotas', meta, autoload=True)
col_resource = getattr(table.c, 'resource')
if col_resource.type.length == 25:
# The resource of project_user_quotas table had been changed to
# invalid length(25) since I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
# The following code fixes the length for the environments which are
# deployed after I56ad98d3702f53fe8cfa94093fea89074f7a5e90.
col_resource.alter(type=String(255))
table.update().where(table.c.resource == 'injected_file_content_byt')\
.values(resource='injected_file_content_bytes').execute()
|
asedunov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/makeFunctionTopLevel/methodImportUpdates/before/other.py
|
44
|
from main import C
inst = C()
inst.method(42)
|
dennisobrien/bokeh
|
refs/heads/master
|
examples/embed/embed_responsive_width_height.py
|
19
|
""" This example shows how a Bokeh plot can be embedded in an HTML
document, in a way that the plot resizes to make use of the available
width and height (while keeping the aspect ratio fixed).
To make this work well, the plot should be placed in a container that
*has* a certain width and height (i.e. non-scrollable), which is the
body element in this case. A more realistic example might be embedding
a plot in a Phosphor widget.
"""
import random
from bokeh.io import output_file, show
from bokeh.plotting import figure
PLOT_OPTIONS = dict(plot_width=600, plot_height=400)
SCATTER_OPTIONS = dict(size=12, alpha=0.5)
data = lambda: [random.choice([i for i in range(100)]) for r in range(10)]
red = figure(sizing_mode='scale_both', tools='pan', **PLOT_OPTIONS)
red.scatter(data(), data(), color="red", **SCATTER_OPTIONS)
output_file('embed_responsive_width_height.html')
show(red)
|
jkramarz/zuombot
|
refs/heads/zuombot
|
plugins/books.py
|
31
|
import requests
from cloudbot import hook
from cloudbot.util import formatting, web
base_url = 'https://www.googleapis.com/books/v1/'
book_search_api = base_url + 'volumes?'
@hook.on_start()
def load_key(bot):
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.command("books", "gbooks")
def books(text):
"""books <query> -- Searches Google Books for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(book_search_api, params={"q": text, "key": dev_key, "country": "US"}).json()
if json.get('error'):
if json['error']['code'] == 403:
print(json['error']['message'])
return "The Books API is off in the Google Developers Console (or check the console)."
else:
return 'Error performing search.'
if json['totalItems'] == 0:
return 'No results found.'
book = json['items'][0]['volumeInfo']
title = book['title']
try:
author = book['authors'][0]
except KeyError:
try:
author = book['publisher']
except KeyError:
author = "Unknown Author"
try:
description = formatting.truncate_str(book['description'], 130)
except KeyError:
description = "No description available."
try:
year = book['publishedDate'][:4]
except KeyError:
year = "No Year"
try:
page_count = book['pageCount']
pages = ' - \x02{:,}\x02 page{}'.format(page_count, "s"[page_count == 1:])
except KeyError:
pages = ''
link = web.shorten(book['infoLink'], service="goo.gl", key=dev_key)
return "\x02{}\x02 by \x02{}\x02 ({}){} - {} - {}".format(title, author, year, pages, description, link)
|
shinate/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/logger.py
|
121
|
import os
import logging
import tempfile
from . import constants
LOG_FILE = None
LOGGER = None
LEVELS = {
constants.DEBUG: logging.DEBUG,
constants.INFO: logging.INFO,
constants.WARNING: logging.WARNING,
constants.ERROR: logging.ERROR,
constants.CRITICAL: logging.CRITICAL
}
def init(filename, level=constants.DEBUG):
"""Initialize the logger.
:param filename: base name of the log file
:param level: logging level (Default value = DEBUG)
"""
global LOG_FILE
LOG_FILE = os.path.join(tempfile.gettempdir(), filename)
with open(LOG_FILE, 'w'):
pass
global LOGGER
LOGGER = logging.getLogger('Three.Export')
LOGGER.setLevel(LEVELS[level])
if not LOGGER.handlers:
stream = logging.StreamHandler()
stream.setLevel(LEVELS[level])
format_ = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
formatter = logging.Formatter(format_)
stream.setFormatter(formatter)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(LEVELS[level])
file_handler.setFormatter(formatter)
LOGGER.addHandler(stream)
LOGGER.addHandler(file_handler)
def _logger(func):
def inner(*args):
if LOGGER is not None:
func(*args)
return inner
@_logger
def info(*args):
LOGGER.info(*args)
@_logger
def debug(*args):
LOGGER.debug(*args)
@_logger
def warning(*args):
LOGGER.warning(*args)
@_logger
def error(*args):
LOGGER.error(*args)
@_logger
def critical(*args):
LOGGER.critical(*args)
|
google-research/falken
|
refs/heads/main
|
service/generated_flatbuffers/tflite/DimensionMetadata.py
|
1
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class DimensionMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDimensionMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionMetadata()
x.Init(buf, n + offset)
return x
@classmethod
def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# DimensionMetadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DimensionMetadata
def Format(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def DenseSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegmentsType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegments(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# DimensionMetadata
def ArrayIndicesType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArrayIndices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def DimensionMetadataStart(builder): builder.StartObject(6)
def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0)
def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0)
def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
def DimensionMetadataEnd(builder): return builder.EndObject()
import tflite.Int32Vector
import tflite.SparseIndexVector
import tflite.Uint16Vector
import tflite.Uint8Vector
try:
from typing import Union
except:
pass
class DimensionMetadataT(object):
# DimensionMetadataT
def __init__(self):
self.format = 0 # type: int
self.denseSize = 0 # type: int
self.arraySegmentsType = 0 # type: int
self.arraySegments = None # type: Union[None, tflite.Int32Vector.Int32VectorT, tflite.Uint16Vector.Uint16VectorT, tflite.Uint8Vector.Uint8VectorT]
self.arrayIndicesType = 0 # type: int
self.arrayIndices = None # type: Union[None, tflite.Int32Vector.Int32VectorT, tflite.Uint16Vector.Uint16VectorT, tflite.Uint8Vector.Uint8VectorT]
@classmethod
def InitFromBuf(cls, buf, pos):
dimensionMetadata = DimensionMetadata()
dimensionMetadata.Init(buf, pos)
return cls.InitFromObj(dimensionMetadata)
@classmethod
def InitFromObj(cls, dimensionMetadata):
x = DimensionMetadataT()
x._UnPack(dimensionMetadata)
return x
# DimensionMetadataT
def _UnPack(self, dimensionMetadata):
if dimensionMetadata is None:
return
self.format = dimensionMetadata.Format()
self.denseSize = dimensionMetadata.DenseSize()
self.arraySegmentsType = dimensionMetadata.ArraySegmentsType()
self.arraySegments = tflite.SparseIndexVector.SparseIndexVectorCreator(self.arraySegmentsType, dimensionMetadata.ArraySegments())
self.arrayIndicesType = dimensionMetadata.ArrayIndicesType()
self.arrayIndices = tflite.SparseIndexVector.SparseIndexVectorCreator(self.arrayIndicesType, dimensionMetadata.ArrayIndices())
# DimensionMetadataT
def Pack(self, builder):
if self.arraySegments is not None:
arraySegments = self.arraySegments.Pack(builder)
if self.arrayIndices is not None:
arrayIndices = self.arrayIndices.Pack(builder)
DimensionMetadataStart(builder)
DimensionMetadataAddFormat(builder, self.format)
DimensionMetadataAddDenseSize(builder, self.denseSize)
DimensionMetadataAddArraySegmentsType(builder, self.arraySegmentsType)
if self.arraySegments is not None:
DimensionMetadataAddArraySegments(builder, arraySegments)
DimensionMetadataAddArrayIndicesType(builder, self.arrayIndicesType)
if self.arrayIndices is not None:
DimensionMetadataAddArrayIndices(builder, arrayIndices)
dimensionMetadata = DimensionMetadataEnd(builder)
return dimensionMetadata
|
MarcJoan/django
|
refs/heads/master
|
tests/field_subclassing/tests.py
|
129
|
from __future__ import unicode_literals
from django.db import connection
from django.test import SimpleTestCase
from .fields import CustomTypedField
class TestDbType(SimpleTestCase):
def test_db_parameters_respects_db_type(self):
f = CustomTypedField()
self.assertEqual(f.db_parameters(connection)['type'], 'custom_field')
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/mobiles/tatooine/pirate_leader.py
|
2
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('mission_pirate_leader')
mobileTemplate.setLevel(23)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("pirate")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_dressed_pirate_leader_of_tatooine.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('pirate_leader', mobileTemplate)
return
|
joansmith/openmicroscopy
|
refs/heads/develop
|
components/tools/OmeroPy/src/omero/plugins/import.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Startup plugin for command-line importer.
"""
import os
import sys
from omero.cli import BaseControl, CLI
import omero.java
from omero_ext.argparse import SUPPRESS
from path import path
START_CLASS = "ome.formats.importer.cli.CommandLineImporter"
TEST_CLASS = "ome.formats.test.util.TestEngine"
HELP = """Run the Java-based command-line importer
This is a Python wrapper around the Java importer. Login is handled by Python
OMERO.cli. To see more options, use "--javahelp".
Options marked with "**" are passed strictly to Java. If they interfere with
any of the Python arguments, you may need to end precede your arguments with a
"--".
"""
EXAMPLES = """
Examples:
# Display help
$ bin/omero import -h
# Import foo.tiff using current login
$ bin/omero import ~/Data/my_file.dv
# Import foo.tiff using input credentials
$ bin/omero import -s localhost -u user -w password foo.tiff
# Set Java debugging level to ALL
$ bin/omero import foo.tiff -- --debug=ALL
# Display used files for importing foo.tiff
$ bin/omero import foo.tiff -f
# Limit debugging output
$ bin/omero import -- --debug=ERROR foo.tiff
For additional information, see:
http://www.openmicroscopy.org/site/support/omero5.2/users/cli/import.html
Report bugs to <ome-users@lists.openmicroscopy.org.uk>
"""
TESTHELP = """Run the Importer TestEngine suite (devs-only)"""
DEBUG_CHOICES = ["ALL", "DEBUG", "ERROR", "FATAL", "INFO", "TRACE", "WARN"]
SKIP_CHOICES = ['all', 'checksum', 'minmax', 'thumbnails', 'upgrade']
class ImportControl(BaseControl):
COMMAND = [START_CLASS]
def _configure(self, parser):
parser.add_login_arguments()
parser.add_argument(
"--javahelp", "--java-help",
action="store_true", help="Show the Java help text")
parser.add_argument(
"--advanced-help", action="store_true", dest="java_advanced_help",
help="Show the advanced help text")
parser.add_argument(
"---file", nargs="?",
help="File for storing the standard out of the Java process")
parser.add_argument(
"---errs", nargs="?",
help="File for storing the standard err of the Java process")
parser.add_argument(
"--clientdir", type=str,
help="Path to the directory containing the client JARs. "
" Default: lib/client")
# The following arguments are strictly passed to Java
name_group = parser.add_argument_group(
'Naming arguments', 'Optional arguments passed strictly to Java.')
name_group.add_argument(
"-n", "--name", dest="java_name",
help="Image or plate name to use (**)",
metavar="NAME")
name_group.add_argument(
"-x", "--description", dest="java_description",
help="Image or plate description to use (**)",
metavar="DESCRIPTION")
# Deprecated naming arguments
name_group.add_argument(
"--plate_name", dest="java_plate_name",
help=SUPPRESS)
name_group.add_argument(
"--plate_description", dest="java_plate_description",
help=SUPPRESS)
# Feedback options
feedback_group = parser.add_argument_group(
'Feedback arguments',
'Optional arguments passed strictly to Java allowing to report'
' errors to the OME team.')
feedback_group.add_argument(
"--report", action="store_true", dest="java_report",
help="Report errors to the OME team (**)")
feedback_group.add_argument(
"--upload", action="store_true", dest="java_upload",
help=("Upload broken files and log file (if any) with report."
" Required --report (**)"))
feedback_group.add_argument(
"--logs", action="store_true", dest="java_logs",
help=("Upload log file (if any) with report."
" Required --report (**)"))
feedback_group.add_argument(
"--email", dest="java_email",
help="Email for reported errors. Required --report (**)",
metavar="EMAIL")
feedback_group.add_argument(
"--qa-baseurl", dest="java_qa_baseurl",
help=SUPPRESS)
# Annotation options
annotation_group = parser.add_argument_group(
'Annotation arguments',
'Optional arguments passed strictly to Java allowing to annotate'
' imports.')
annotation_group.add_argument(
"--annotation-ns", dest="java_ns", metavar="ANNOTATION_NS",
help="Namespace to use for subsequent annotation (**)")
annotation_group.add_argument(
"--annotation-text", dest="java_text", metavar="ANNOTATION_TEXT",
help="Content for a text annotation (requires namespace) (**)")
annotation_group.add_argument(
"--annotation-link", dest="java_link",
metavar="ANNOTATION_LINK",
help="Comment annotation ID to link all images to (**)")
annotation_group.add_argument(
"--annotation_ns", dest="java_ns", metavar="ANNOTATION_NS",
help=SUPPRESS)
annotation_group.add_argument(
"--annotation_text", dest="java_text", metavar="ANNOTATION_TEXT",
help=SUPPRESS)
annotation_group.add_argument(
"--annotation_link", dest="java_link", metavar="ANNOTATION_LINK",
help=SUPPRESS)
java_group = parser.add_argument_group(
'Java arguments', 'Optional arguments passed strictly to Java')
java_group.add_argument(
"-f", dest="java_f", action="store_true",
help="Display the used files and exit (**)")
java_group.add_argument(
"-c", dest="java_c", action="store_true",
help="Continue importing after errors (**)")
java_group.add_argument(
"-l", dest="java_l",
help="Use the list of readers rather than the default (**)",
metavar="READER_FILE")
java_group.add_argument(
"-d", dest="java_d",
help="OMERO dataset ID to import image into (**)",
metavar="DATASET_ID")
java_group.add_argument(
"-r", dest="java_r",
help="OMERO screen ID to import plate into (**)",
metavar="SCREEN_ID")
java_group.add_argument(
"--debug", choices=DEBUG_CHOICES, dest="java_debug",
help="Turn debug logging on (**)",
metavar="LEVEL")
parser.add_argument(
"--depth", default=4, type=int,
help="Number of directories to scan down for files")
parser.add_argument(
"--skip", type=str, choices=SKIP_CHOICES, action='append',
help="Optional step to skip during import")
parser.add_argument(
"path", nargs="*",
help="Path to be passed to the Java process")
parser.set_defaults(func=self.importer)
def set_login_arguments(self, args):
"""Set the connection arguments"""
# Connection is required unless help arguments or -f is passed
connection_required = ("-h" not in self.command_args and
not args.java_f and
not args.java_advanced_help)
if connection_required:
client = self.ctx.conn(args)
self.command_args.extend(["-s", client.getProperty("omero.host")])
self.command_args.extend(["-p", client.getProperty("omero.port")])
self.command_args.extend(["-k", client.getSessionId()])
def set_skip_arguments(self, args):
"""Set the arguments to skip steps during import"""
if not args.skip:
return
if ('all' in args.skip or 'checksum' in args.skip):
self.command_args.append("--checksum-algorithm=File-Size-64")
if ('all' in args.skip or 'thumbnails' in args.skip):
self.command_args.append("--no-thumbnails")
if ('all' in args.skip or 'minmax' in args.skip):
self.command_args.append("--no-stats-info")
if ('all' in args.skip or 'upgrade' in args.skip):
self.command_args.append("--no-upgrade-check")
def set_java_arguments(self, args):
"""Set the arguments passed to Java"""
# Due to the use of "--" some of these like debug
# will never be filled out. But for completeness
# sake, we include them here.
java_args = {
"java_f": "-f",
"java_c": "-c",
"java_l": "-l",
"java_d": "-d",
"java_r": "-r",
"java_name": ("--name",),
"java_description": ("--description",),
"java_plate_name": ("--plate_name",),
"java_plate_description": ("--plate_description",),
"java_report": ("--report"),
"java_upload": ("--upload"),
"java_logs": ("--logs"),
"java_email": ("--email"),
"java_debug": ("--debug",),
"java_qa_baseurl": ("--qa-baseurl",),
"java_ns": "--annotation-ns",
"java_text": "--annotation-text",
"java_link": "--annotation-link",
"java_advanced_help": "--advanced-help",
}
for attr_name, arg_name in java_args.items():
arg_value = getattr(args, attr_name)
if arg_value:
if isinstance(arg_name, tuple):
arg_name = arg_name[0]
self.command_args.append(
"%s=%s" % (arg_name, arg_value))
else:
self.command_args.append(arg_name)
if isinstance(arg_value, (str, unicode)):
self.command_args.append(arg_value)
def importer(self, args):
if args.clientdir:
client_dir = path(args.clientdir)
else:
client_dir = self.ctx.dir / "lib" / "client"
etc_dir = self.ctx.dir / "etc"
xml_file = etc_dir / "logback-cli.xml"
logback = "-Dlogback.configurationFile=%s" % xml_file
try:
classpath = [file.abspath() for file in client_dir.files("*.jar")]
except OSError as e:
self.ctx.die(102, "Cannot get JAR files from '%s' (%s)"
% (client_dir, e.strerror))
if not classpath:
self.ctx.die(103, "No JAR files found under '%s'" % client_dir)
xargs = [logback, "-Xmx1024M", "-cp", os.pathsep.join(classpath)]
# Create import command to be passed to Java
self.command_args = []
if args.javahelp:
self.command_args.append("-h")
self.set_login_arguments(args)
self.set_skip_arguments(args)
self.set_java_arguments(args)
xargs.append("-Domero.import.depth=%s" % args.depth)
import_command = self.COMMAND + self.command_args + args.path
try:
# Open file handles for stdout/stderr if applicable
out = args.file
err = args.errs
if out:
out = open(out, "w")
if err:
err = open(err, "w")
p = omero.java.popen(
import_command, debug=False, xargs=xargs, stdout=out,
stderr=err)
self.ctx.rv = p.wait()
finally:
# Make sure file handles are closed
if out:
out.close()
if err:
err.close()
class TestEngine(ImportControl):
COMMAND = [TEST_CLASS]
try:
register("import", ImportControl, HELP, epilog=EXAMPLES)
register("testengine", TestEngine, TESTHELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("import", ImportControl, HELP, epilog=EXAMPLES)
cli.register("testengine", TestEngine, TESTHELP)
cli.invoke(sys.argv[1:])
|
pombredanne/pytest_django
|
refs/heads/master
|
pytest_django_test/settings_sqlite.py
|
13
|
from .settings_base import * # noqa
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
|
hiteshagrawal/python
|
refs/heads/master
|
mbox-short-spam.py
|
1
|
#!/usr/bin/python
my_list = []
with open('mbox-short.txt') as fh:
for line in fh:
if line.startswith('X-DSPAM-Confidence:'):
line = line.strip()
#print line
stpos = line.find(' ')
number = line[stpos+1:len(line)]
my_list += [float(number)]
print my_list
#print sum(my_list)
avg = sum(my_list)/len(my_list)
print("This avg spam confidence is:"),str(avg)
|
Big-B702/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/internet/test/reactormixins.py
|
49
|
# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
import signal
from twisted.internet.defer import TimeoutError
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python.runtime import platformType
from twisted.python.reflect import namedAny
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
if platformType == 'posix':
from twisted.internet import process
else:
process = None
class ReactorBuilder:
"""
L{TestCase} mixin which provides a reactor-creation API. This mixin
defines C{setUp} and C{tearDown}, so mix it in before L{TestCase} or call
its methods from the overridden ones in the subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, C{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
TestCases will be created.
"""
_reactors = ["twisted.internet.selectreactor.SelectReactor",
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor",
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.kqreactor.KQueueReactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor",
"twisted.internet.cfreactor.CFReactor"]
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if platformType == 'posix':
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = filter(
lambda required: not required.providedBy(reactor),
self.requiredInterfaces)
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%r does not provide %s" % (
reactor, ",".join([repr(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TimeoutError: If the reactor is still running after C{timeout}
seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{TestCase} subclass which mixes in C{cls} for each known
reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, TestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
__all__ = ['ReactorBuilder']
|
atalax/libsigrokdecode
|
refs/heads/qi
|
decoders/pan1321/pd.py
|
13
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012-2013 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
# ...
RX = 0
TX = 1
class Decoder(srd.Decoder):
api_version = 2
id = 'pan1321'
name = 'PAN1321'
longname = 'Panasonic PAN1321'
desc = 'Bluetooth RF module with Serial Port Profile (SPP).'
license = 'gplv2+'
inputs = ['uart']
outputs = ['pan1321']
annotations = (
('text-verbose', 'Human-readable text (verbose)'),
('text', 'Human-readable text'),
('warnings', 'Human-readable warnings'),
)
def __init__(self, **kwargs):
self.cmd = ['', '']
self.ss_block = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def handle_host_command(self, rxtx, s):
if s.startswith('AT+JAAC'):
# AT+JAAC=<auto_accept> (0 or 1)
p = s[s.find('=') + 1:]
if p not in ('0', '1'):
self.putx([2, ['Warning: Invalid JAAC parameter "%s"' % p]])
return
x = 'Auto' if (p == '1') else 'Don\'t auto'
self.putx([0, ['%s-accept new connections' % x]])
self.putx([1, ['%s-accept connections' % x]])
elif s.startswith('AT+JPRO'):
# AT+JPRO=<mode> (0 or 1)
p = s[s.find('=') + 1:]
if p not in ('0', '1'):
self.putx([2, ['Warning: Invalid JPRO parameter "%s"' % p]])
return
onoff = 'off' if (p == '0') else 'on'
x = 'Leaving' if (p == '0') else 'Entering'
self.putx([0, ['%s production mode' % x]])
self.putx([1, ['Production mode = %s' % onoff]])
elif s.startswith('AT+JRES'):
# AT+JRES
if s != 'AT+JRES': # JRES has no params.
self.putx([2, ['Warning: Invalid JRES usage.']])
return
self.putx([0, ['Triggering a software reset']])
self.putx([1, ['Reset']])
elif s.startswith('AT+JSDA'):
# AT+JSDA=<l>,<d> (l: length in bytes, d: data)
# l is (max?) 3 decimal digits and ranges from 1 to MTU size.
# Data can be ASCII or binary values (l bytes total).
l, d = s[s.find('=') + 1:].split(',')
if not l.isnumeric():
self.putx([2, ['Warning: Invalid data length "%s".' % l]])
if int(l) != len(d):
self.putx([2, ['Warning: Data length mismatch (%d != %d).' % \
(int(l), len(d))]])
# TODO: Warn if length > MTU size (which is firmware-dependent
# and is negotiated by both Bluetooth devices upon connection).
b = ''.join(['%02x ' % ord(c) for c in d])[:-1]
self.putx([0, ['Sending %d data bytes: %s' % (int(l), b)]])
self.putx([1, ['Send %d = %s' % (int(l), b)]])
elif s.startswith('AT+JSEC'):
# AT+JSEC=<secmode>,<linkkey_info>,<pintype>,<pinlen>,<pin>
# secmode: Security mode 1 or 3 (default).
# linkkey_info: Must be 1 or 2. Has no function according to docs.
# pintype: 1: variable pin (default), 2: fixed pin.
# pinlen: PIN length (2 decimal digits). Max. PIN length is 16.
# pin: The Bluetooth PIN ('pinlen' chars). Used if pintype=2.
# Note: AT+JSEC (if used) must be the first command after reset.
# TODO: Parse all the other parameters.
pin = s[-4:]
self.putx([0, ['Host set the Bluetooth PIN to "' + pin + '"']])
self.putx([1, ['PIN = ' + pin]])
elif s.startswith('AT+JSLN'):
# AT+JSLN=<namelen>,<name>
# namelen: Friendly name length (2 decimal digits). Max. len is 18.
# name: The Bluetooth "friendly name" ('namelen' ASCII characters).
name = s[s.find(',') + 1:]
self.putx([0, ['Host set the Bluetooth name to "' + name + '"']])
self.putx([1, ['BT name = ' + name]])
else:
self.putx([0, ['Host sent unsupported command: %s' % s]])
self.putx([1, ['Unsupported command: %s' % s]])
def handle_device_reply(self, rxtx, s):
if s == 'ROK':
self.putx([0, ['Device initialized correctly']])
self.putx([1, ['Init']])
elif s == 'OK':
self.putx([0, ['Device acknowledged last command']])
self.putx([1, ['ACK']])
elif s.startswith('ERR'):
error = s[s.find('=') + 1:]
self.putx([0, ['Device sent error code ' + error]])
self.putx([1, ['ERR = ' + error]])
else:
self.putx([0, ['Device sent an unknown reply: %s' % s]])
self.putx([1, ['Unknown reply: %s' % s]])
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
# For now, ignore all UART packets except the actual data packets.
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# If this is the start of a command/reply, remember the start sample.
if self.cmd[rxtx] == '':
self.ss_block = ss
# Append a new (ASCII) byte to the currently built/parsed command.
self.cmd[rxtx] += chr(pdata)
# Get packets/bytes until an \r\n sequence is found (end of command).
if self.cmd[rxtx][-2:] != '\r\n':
return
# Handle host commands and device replies.
# We remove trailing \r\n from the strings before handling them.
self.es_block = es
if rxtx == RX:
self.handle_device_reply(rxtx, self.cmd[rxtx][:-2])
elif rxtx == TX:
self.handle_host_command(rxtx, self.cmd[rxtx][:-2])
self.cmd[rxtx] = ''
|
leighpauls/k2cro4
|
refs/heads/master
|
chrome/test/functional/infobars.py
|
4
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class InfobarTest(pyauto.PyUITest):
"""TestCase for Infobars."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
To run:
python chrome/test/functional/infobars.py infobars.InfobarTest.Debug
"""
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetBrowserInfo()
for window in info['windows']:
for tab in window['tabs']:
print 'Window', window['index'], 'tab', tab['index']
self.pprint(tab['infobars'])
def setUp(self):
pyauto.PyUITest.setUp(self)
self._flash_plugin_type = 'Plug-in'
if self.GetBrowserInfo()['properties']['branding'] == 'Google Chrome':
self._flash_plugin_type = 'Pepper Plugin'
# Forcibly trigger all plugins to get registered. crbug.com/94123
# Sometimes flash files loaded too quickly after firing browser
# ends up getting downloaded, which seems to indicate that the plugin
# hasn't been registered yet.
self.GetPluginsInfo()
def _GetTabInfo(self, windex=0, tab_index=0):
"""Helper to return info for the given tab in the given window.
Defaults to first tab in first window.
"""
return self.GetBrowserInfo()['windows'][windex]['tabs'][tab_index]
def testPluginCrashInfobar(self):
"""Verify the "plugin crashed" infobar."""
flash_url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
# Trigger flash plugin
self.NavigateToURL(flash_url)
child_processes = self.GetBrowserInfo()['child_processes']
flash = [x for x in child_processes if
x['type'] == self._flash_plugin_type and
x['name'] == 'Shockwave Flash'][0]
self.assertTrue(flash)
logging.info('Killing flash plugin. pid %d' % flash['pid'])
self.Kill(flash['pid'])
self.assertTrue(self.WaitForInfobarCount(1))
crash_infobar = self._GetTabInfo()['infobars']
self.assertTrue(crash_infobar)
self.assertEqual(1, len(crash_infobar))
self.assertTrue('crashed' in crash_infobar[0]['text'])
self.assertEqual('confirm_infobar', crash_infobar[0]['type'])
# Dismiss the infobar
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def _VerifyGeolocationInfobar(self, windex, tab_index):
"""Verify geolocation infobar properties.
Assumes that geolocation infobar is showing up in the given tab in the
given window.
"""
# TODO(dyu): Remove this helper function when a function to identify
# infobar_type and index of the type is implemented.
tab_info = self._GetTabInfo(windex, tab_index)
geolocation_infobar = tab_info['infobars']
self.assertTrue(geolocation_infobar)
self.assertEqual(1, len(geolocation_infobar))
self.assertEqual('Learn more', geolocation_infobar[0]['link_text'])
self.assertEqual(2, len(geolocation_infobar[0]['buttons']))
self.assertEqual('Allow', geolocation_infobar[0]['buttons'][0])
self.assertEqual('Deny', geolocation_infobar[0]['buttons'][1])
def testGeolocationInfobar(self):
"""Verify geoLocation infobar."""
url = self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html')
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self._VerifyGeolocationInfobar(windex=0, tab_index=0)
# Accept, and verify that the infobar went away
self.PerformActionOnInfobar('accept', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def testGeolocationInfobarInMultipleTabsAndWindows(self):
"""Verify GeoLocation inforbar in multiple tabs."""
url = self.GetFileURLForDataPath( # triggers geolocation
'geolocation', 'geolocation_on_load.html')
for tab_index in range(1, 2):
self.AppendTab(pyauto.GURL(url))
self.assertTrue(
self.WaitForInfobarCount(1, windex=0, tab_index=tab_index))
self._VerifyGeolocationInfobar(windex=0, tab_index=tab_index)
# Try in a new window
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self.WaitForInfobarCount(1, windex=1, tab_index=0))
self._VerifyGeolocationInfobar(windex=1, tab_index=0)
# Incognito window
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 2, 0)
self.assertTrue(self.WaitForInfobarCount(1, windex=2, tab_index=0))
self._VerifyGeolocationInfobar(windex=2, tab_index=0)
def testMultipleDownloadsInfobar(self):
"""Verify the mutiple downloads infobar."""
zip_files = ['a_zip_file.zip']
zip_files.append(zip_files[0].replace('.', ' (1).'))
html_file = 'download-a_zip_file.html'
assert pyauto.PyUITest.IsEnUS()
file_url = self.GetFileURLForDataPath('downloads', html_file)
match_text = 'This site is attempting to download multiple files. ' \
'Do you want to allow this?'
self.NavigateToURL('chrome://downloads') # trigger download manager
for zip_file in zip_files:
test_utils.RemoveDownloadedTestFile(self, zip_file)
self.DownloadAndWaitForStart(file_url)
self.assertTrue(self.WaitForInfobarCount(1))
tab_info = self._GetTabInfo(0, 0)
infobars = tab_info['infobars']
self.assertTrue(infobars, 'Expected the multiple downloads infobar')
self.assertEqual(1, len(infobars))
self.assertEqual(match_text, infobars[0]['text'])
self.assertEqual(2, len(infobars[0]['buttons']))
self.assertEqual('Allow', infobars[0]['buttons'][0])
self.assertEqual('Deny', infobars[0]['buttons'][1])
self.WaitForAllDownloadsToComplete()
for zip_file in zip_files:
test_utils.RemoveDownloadedTestFile(self, zip_file)
def _GetFlashCrashInfobarCount(self, windex=0, tab_index=0):
"""Returns the count of 'Shockwave Flash has crashed' infobars."""
browser_window = self.GetBrowserInfo()['windows'][windex]
infobars = browser_window['tabs'][tab_index]['infobars']
flash_crash_infobar_count = 0
for infobar in infobars:
if (('text' in infobar) and
infobar['text'].startswith('Shockwave Flash has crashed')):
flash_crash_infobar_count += 1
return flash_crash_infobar_count
def testPluginCrashForMultiTabs(self):
"""Verify plugin crash infobar shows up only on the tabs using plugin."""
non_flash_url = self.GetFileURLForDataPath('english_page.html')
flash_url = self.GetFileURLForContentDataPath('plugin', 'FlashSpin.swf')
# False = Non flash url, True = Flash url
# We have set of these values to compare a flash page and a non-flash page
urls_type = [False, True, False, True, False]
for _ in range(2):
self.AppendTab(pyauto.GURL(flash_url))
self.AppendTab(pyauto.GURL(non_flash_url))
# Killing flash process
child_processes = self.GetBrowserInfo()['child_processes']
flash = [x for x in child_processes if
x['type'] == self._flash_plugin_type and
x['name'] == 'Shockwave Flash'][0]
self.assertTrue(flash)
self.Kill(flash['pid'])
# Crash plugin infobar should show up in the second tab of this window
# so passing window and tab argument in the wait for an infobar.
self.assertTrue(self.WaitForInfobarCount(1, windex=0, tab_index=1))
for i in range(len(urls_type)):
# Verify that if page doesn't have flash plugin,
# it should not have infobar popped-up
self.ActivateTab(i)
if not urls_type[i]:
self.assertEqual(
self._GetFlashCrashInfobarCount(0, i), 0,
msg='Did not expect crash infobar in tab at index %d' % i)
elif urls_type[i]:
self.assertEqual(
self._GetFlashCrashInfobarCount(0, i), 1,
msg='Expected crash infobar in tab at index %d' % i)
infobar = self.GetBrowserInfo()['windows'][0]['tabs'][i]['infobars']
self.assertEqual(infobar[0]['type'], 'confirm_infobar')
self.assertEqual(len(infobar), 1)
class OneClickInfobarTest(pyauto.PyUITest):
"""Tests for one-click sign in infobar."""
BLOCK_COOKIE_PATTERN = {'https://accounts.google.com/': {'cookies': 2}}
OC_INFOBAR_TYPE = 'oneclicklogin_infobar'
PW_INFOBAR_TYPE = 'password_infobar'
URL = 'https://www.google.com/accounts/ServiceLogin'
URL_LOGIN = 'https://www.google.com/accounts/Login'
URL_LOGOUT = 'https://www.google.com/accounts/Logout'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def _LogIntoGoogleAccount(self, tab_index=0, windex=0):
"""Log into Google account.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
test_utils.GoogleAccountsLogin(self, username, password, tab_index, windex)
# TODO(dyu): Use WaitUntilNavigationCompletes after investigating
# crbug.com/124877
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
def _PerformActionOnInfobar(self, action):
"""Perform an action on the infobar: accept, cancel, or dismiss.
The one-click sign in infobar must show in the first tab of the first
window. If action is 'accept' then the account is synced. If the action is
'cancel' then the infobar should be dismissed and never shown again. The
account will not be synced. If the action is 'dismiss' then the infobar will
shown again after the next login.
Args:
action: The action to perform on the infobar.
"""
infobar_index = test_utils.WaitForInfobarTypeAndGetIndex(
self, self.OC_INFOBAR_TYPE)
self.PerformActionOnInfobar(action, infobar_index)
def _DisplayOneClickInfobar(self, tab_index=0, windex=0):
"""One-click sign in infobar appears after logging into google account.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
self._LogIntoGoogleAccount(tab_index=tab_index, windex=windex)
self.assertTrue(self.WaitUntil(
lambda: test_utils.GetInfobarIndexByType(
self, self.OC_INFOBAR_TYPE,
tab_index=tab_index, windex=windex) is not None),
msg='The one-click login infobar did not appear.')
def testDisplayOneClickInfobar(self):
"""Verify one-click infobar appears after login into google account.
One-click infobar should appear after signing into a google account
for the first time using a clean profile.
"""
self._DisplayOneClickInfobar()
def testNoOneClickInfobarAfterCancel(self):
"""Verify one-click infobar does not appear again after clicking cancel.
The one-click infobar should not display again after logging into an
account and selecting to reject sync the first time. The test covers
restarting the browser with the same profile and verifying the one-click
infobar does not show after login.
This test also verifies that the password infobar displays.
"""
self._DisplayOneClickInfobar()
self._PerformActionOnInfobar(action='cancel') # Click 'No thanks' button.
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.PW_INFOBAR_TYPE)
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
# Restart browser with the same profile.
self.RestartBrowser(clear_profile=False)
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
def testDisplayOneClickInfobarAfterDismiss(self):
"""Verify one-click infobar appears again after clicking dismiss button.
The one-click infobar should display again after logging into an
account and clicking to dismiss the infobar the first time.
This test also verifies that the password infobar does not display.
The one-click infobar should supersede the password infobar.
"""
self._DisplayOneClickInfobar()
self._PerformActionOnInfobar(action='dismiss') # Click 'x' button.
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.OC_INFOBAR_TYPE)
test_utils.AssertInfobarTypeDoesNotAppear(self, self.PW_INFOBAR_TYPE)
def _OpenSecondProfile(self):
"""Create a second profile."""
self.OpenNewBrowserWindowWithNewProfile()
self.assertEqual(2, len(self.GetMultiProfileInfo()['profiles']),
msg='The second profile was not created.')
def testDisplayOneClickInfobarPerProfile(self):
"""Verify one-click infobar appears for each profile after sign-in."""
# Default profile.
self._DisplayOneClickInfobar()
self._OpenSecondProfile()
self._DisplayOneClickInfobar(windex=1)
def testNoSameIDSigninForTwoProfiles(self):
"""Verify two profiles cannot be signed in with same ID.
Make sure that the one-click sign in infobar does not appear for two
profiles trying to sign in with the same ID. This test creates a profile
and connects it to a Google account. Another new profile is created and
tries to login with the connected account from the first profile.
This test verifies the following bug: crbug.com/122975
"""
test_utils.SignInToSyncAndVerifyState(self, 'test_google_account')
self._OpenSecondProfile()
self._LogIntoGoogleAccount(tab_index=0, windex=1)
self.assertTrue(lambda: test_utils.GetInfobarIndexByType(
self, self.OC_INFOBAR_TYPE, tab_index=0, windex=1) is None)
def testNoOneClickInfobarWhenCookiesBlocked(self):
"""Verify one-click infobar does not show when cookies are blocked.
One-click sign in should not be enabled if cookies are blocked for Google
accounts domain.
This test verifies the following bug: crbug.com/117841
"""
# Block cookies for Google accounts domain.
self.SetPrefs(pyauto.kContentSettingsPatternPairs,
self.BLOCK_COOKIE_PATTERN)
self._LogIntoGoogleAccount()
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
def testOneClickInfobarShownWhenWinLoseFocus(self):
"""Verify one-click infobar still shows when window loses focus.
This test verifies the following bug: crbug.com/121739
"""
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.OC_INFOBAR_TYPE)
# Open new window to shift focus away.
self.OpenNewBrowserWindow(True)
test_utils.GetInfobarIndexByType(self, self.OC_INFOBAR_TYPE)
def testNoOneClickInfobarInIncognito(self):
"""Verify that one-click infobar does not show up in incognito mode."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self._LogIntoGoogleAccount(windex=1)
test_utils.AssertInfobarTypeDoesNotAppear(
self, self.OC_INFOBAR_TYPE, windex=1)
if __name__ == '__main__':
pyauto_functional.Main()
|
jmerkow/VTK
|
refs/heads/master
|
Common/DataModel/Testing/Python/ImplicitSum.py
|
20
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This example demonstrates adding two implicit models
# to produce an (unexpected!) result
# first we load in the standard vtk packages into tcl
geomObject1 = vtk.vtkCone()
geomObject2 = vtk.vtkSphere()
geomObject2.SetRadius(0.5)
geomObject2.SetCenter(0.5,0,0)
sum = vtk.vtkImplicitSum()
sum.SetNormalizeByWeight(1)
sum.AddFunction(geomObject1,2)
sum.AddFunction(geomObject2,1)
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(sum)
sample.SetSampleDimensions(60,60,60)
sample.ComputeNormalsOn()
surface = vtk.vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
surface.SetValue(0,0.0)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(0.2,0.4,0.6)
actor.GetProperty().SetSpecular(0.4)
actor.GetProperty().SetDiffuse(0.7)
actor.GetProperty().SetSpecularPower(40)
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,300)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(60)
ren1.GetActiveCamera().Elevation(-10)
ren1.GetActiveCamera().Dolly(1.5)
ren1.ResetCameraClippingRange()
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# render the image
#
renWin.Render()
# --- end of script --
|
VladimirVystupkin/AMRParsing1.x
|
refs/heads/master
|
stanfordnlp/unidecode/x061.py
|
252
|
data = (
'Qiao ', # 0x00
'Chou ', # 0x01
'Bei ', # 0x02
'Xuan ', # 0x03
'Wei ', # 0x04
'Ge ', # 0x05
'Qian ', # 0x06
'Wei ', # 0x07
'Yu ', # 0x08
'Yu ', # 0x09
'Bi ', # 0x0a
'Xuan ', # 0x0b
'Huan ', # 0x0c
'Min ', # 0x0d
'Bi ', # 0x0e
'Yi ', # 0x0f
'Mian ', # 0x10
'Yong ', # 0x11
'Kai ', # 0x12
'Dang ', # 0x13
'Yin ', # 0x14
'E ', # 0x15
'Chen ', # 0x16
'Mou ', # 0x17
'Ke ', # 0x18
'Ke ', # 0x19
'Yu ', # 0x1a
'Ai ', # 0x1b
'Qie ', # 0x1c
'Yan ', # 0x1d
'Nuo ', # 0x1e
'Gan ', # 0x1f
'Yun ', # 0x20
'Zong ', # 0x21
'Sai ', # 0x22
'Leng ', # 0x23
'Fen ', # 0x24
'[?] ', # 0x25
'Kui ', # 0x26
'Kui ', # 0x27
'Que ', # 0x28
'Gong ', # 0x29
'Yun ', # 0x2a
'Su ', # 0x2b
'Su ', # 0x2c
'Qi ', # 0x2d
'Yao ', # 0x2e
'Song ', # 0x2f
'Huang ', # 0x30
'Ji ', # 0x31
'Gu ', # 0x32
'Ju ', # 0x33
'Chuang ', # 0x34
'Ni ', # 0x35
'Xie ', # 0x36
'Kai ', # 0x37
'Zheng ', # 0x38
'Yong ', # 0x39
'Cao ', # 0x3a
'Sun ', # 0x3b
'Shen ', # 0x3c
'Bo ', # 0x3d
'Kai ', # 0x3e
'Yuan ', # 0x3f
'Xie ', # 0x40
'Hun ', # 0x41
'Yong ', # 0x42
'Yang ', # 0x43
'Li ', # 0x44
'Sao ', # 0x45
'Tao ', # 0x46
'Yin ', # 0x47
'Ci ', # 0x48
'Xu ', # 0x49
'Qian ', # 0x4a
'Tai ', # 0x4b
'Huang ', # 0x4c
'Yun ', # 0x4d
'Shen ', # 0x4e
'Ming ', # 0x4f
'[?] ', # 0x50
'She ', # 0x51
'Cong ', # 0x52
'Piao ', # 0x53
'Mo ', # 0x54
'Mu ', # 0x55
'Guo ', # 0x56
'Chi ', # 0x57
'Can ', # 0x58
'Can ', # 0x59
'Can ', # 0x5a
'Cui ', # 0x5b
'Min ', # 0x5c
'Te ', # 0x5d
'Zhang ', # 0x5e
'Tong ', # 0x5f
'Ao ', # 0x60
'Shuang ', # 0x61
'Man ', # 0x62
'Guan ', # 0x63
'Que ', # 0x64
'Zao ', # 0x65
'Jiu ', # 0x66
'Hui ', # 0x67
'Kai ', # 0x68
'Lian ', # 0x69
'Ou ', # 0x6a
'Song ', # 0x6b
'Jin ', # 0x6c
'Yin ', # 0x6d
'Lu ', # 0x6e
'Shang ', # 0x6f
'Wei ', # 0x70
'Tuan ', # 0x71
'Man ', # 0x72
'Qian ', # 0x73
'She ', # 0x74
'Yong ', # 0x75
'Qing ', # 0x76
'Kang ', # 0x77
'Di ', # 0x78
'Zhi ', # 0x79
'Lou ', # 0x7a
'Juan ', # 0x7b
'Qi ', # 0x7c
'Qi ', # 0x7d
'Yu ', # 0x7e
'Ping ', # 0x7f
'Liao ', # 0x80
'Cong ', # 0x81
'You ', # 0x82
'Chong ', # 0x83
'Zhi ', # 0x84
'Tong ', # 0x85
'Cheng ', # 0x86
'Qi ', # 0x87
'Qu ', # 0x88
'Peng ', # 0x89
'Bei ', # 0x8a
'Bie ', # 0x8b
'Chun ', # 0x8c
'Jiao ', # 0x8d
'Zeng ', # 0x8e
'Chi ', # 0x8f
'Lian ', # 0x90
'Ping ', # 0x91
'Kui ', # 0x92
'Hui ', # 0x93
'Qiao ', # 0x94
'Cheng ', # 0x95
'Yin ', # 0x96
'Yin ', # 0x97
'Xi ', # 0x98
'Xi ', # 0x99
'Dan ', # 0x9a
'Tan ', # 0x9b
'Duo ', # 0x9c
'Dui ', # 0x9d
'Dui ', # 0x9e
'Su ', # 0x9f
'Jue ', # 0xa0
'Ce ', # 0xa1
'Xiao ', # 0xa2
'Fan ', # 0xa3
'Fen ', # 0xa4
'Lao ', # 0xa5
'Lao ', # 0xa6
'Chong ', # 0xa7
'Han ', # 0xa8
'Qi ', # 0xa9
'Xian ', # 0xaa
'Min ', # 0xab
'Jing ', # 0xac
'Liao ', # 0xad
'Wu ', # 0xae
'Can ', # 0xaf
'Jue ', # 0xb0
'Cu ', # 0xb1
'Xian ', # 0xb2
'Tan ', # 0xb3
'Sheng ', # 0xb4
'Pi ', # 0xb5
'Yi ', # 0xb6
'Chu ', # 0xb7
'Xian ', # 0xb8
'Nao ', # 0xb9
'Dan ', # 0xba
'Tan ', # 0xbb
'Jing ', # 0xbc
'Song ', # 0xbd
'Han ', # 0xbe
'Jiao ', # 0xbf
'Wai ', # 0xc0
'Huan ', # 0xc1
'Dong ', # 0xc2
'Qin ', # 0xc3
'Qin ', # 0xc4
'Qu ', # 0xc5
'Cao ', # 0xc6
'Ken ', # 0xc7
'Xie ', # 0xc8
'Ying ', # 0xc9
'Ao ', # 0xca
'Mao ', # 0xcb
'Yi ', # 0xcc
'Lin ', # 0xcd
'Se ', # 0xce
'Jun ', # 0xcf
'Huai ', # 0xd0
'Men ', # 0xd1
'Lan ', # 0xd2
'Ai ', # 0xd3
'Lin ', # 0xd4
'Yan ', # 0xd5
'Gua ', # 0xd6
'Xia ', # 0xd7
'Chi ', # 0xd8
'Yu ', # 0xd9
'Yin ', # 0xda
'Dai ', # 0xdb
'Meng ', # 0xdc
'Ai ', # 0xdd
'Meng ', # 0xde
'Dui ', # 0xdf
'Qi ', # 0xe0
'Mo ', # 0xe1
'Lan ', # 0xe2
'Men ', # 0xe3
'Chou ', # 0xe4
'Zhi ', # 0xe5
'Nuo ', # 0xe6
'Nuo ', # 0xe7
'Yan ', # 0xe8
'Yang ', # 0xe9
'Bo ', # 0xea
'Zhi ', # 0xeb
'Kuang ', # 0xec
'Kuang ', # 0xed
'You ', # 0xee
'Fu ', # 0xef
'Liu ', # 0xf0
'Mie ', # 0xf1
'Cheng ', # 0xf2
'[?] ', # 0xf3
'Chan ', # 0xf4
'Meng ', # 0xf5
'Lan ', # 0xf6
'Huai ', # 0xf7
'Xuan ', # 0xf8
'Rang ', # 0xf9
'Chan ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Huan ', # 0xfd
'She ', # 0xfe
'Yi ', # 0xff
)
|
sleshepic/l900_MC2_Kernel
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
frankvdp/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/6_auto.py
|
266
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "5_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
jaruba/chromium.src
|
refs/heads/nw12
|
chrome/test/telemetry/telemetry_lib.py
|
133
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import sys
path = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'tools', 'telemetry')
sys.path.append(path)
import telemetry
sys.path.pop()
|
cogeorg/black_rhino
|
refs/heads/master
|
examples/degroot/networkx/utils/random_sequence.py
|
20
|
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult(dschult@colgate.edu)',
'Ben Edwards(bedwards@cs.unm.edu)'])
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf=[]
cdf.append(0.0)
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
liunian/closure-linter
|
refs/heads/master
|
closure_linter/runner.py
|
102
|
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = 'nnaze@google.com (Nathan Naze)'
import traceback
import gflags as flags
from closure_linter import checker
from closure_linter import ecmalintrules
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter.common import error
from closure_linter.common import htmlutil
from closure_linter.common import tokens
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'List of files with relaxed documentation checks. Will not '
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.')
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
flags.ADOPT_module_key_flags(checker)
flags.ADOPT_module_key_flags(ecmalintrules)
flags.ADOPT_module_key_flags(error_check)
def _GetLastNonWhiteSpaceToken(start_token):
"""Get the last non-whitespace token in a token stream."""
ret_token = None
whitespace_tokens = frozenset([
tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
for t in start_token:
if t.type not in whitespace_tokens:
ret_token = t
return ret_token
def _IsHtml(filename):
return filename.endswith('.html') or filename.endswith('.htm')
def _Tokenize(fileobj):
"""Tokenize a file.
Args:
fileobj: file-like object (or iterable lines) with the source.
Returns:
The first token in the token stream and the ending mode of the tokenizer.
"""
tokenizer = javascripttokenizer.JavaScriptTokenizer()
start_token = tokenizer.TokenizeFile(fileobj)
return start_token, tokenizer.mode
def _IsLimitedDocCheck(filename, limited_doc_files):
"""Whether this this a limited-doc file.
Args:
filename: The filename.
limited_doc_files: Iterable of strings. Suffixes of filenames that should
be limited doc check.
Returns:
Whether the file should be limited check.
"""
for limited_doc_filename in limited_doc_files:
if filename.endswith(limited_doc_filename):
return True
return False
def Run(filename, error_handler, source=None):
"""Tokenize, run passes, and check the given file.
Args:
filename: The path of the file to check
error_handler: The error handler to report errors to.
source: A file-like object with the file source. If omitted, the file will
be read from the filename path.
"""
if not source:
try:
source = open(filename)
except IOError:
error_handler.HandleFile(filename, None)
error_handler.HandleError(
error.Error(errors.FILE_NOT_FOUND, 'File not found'))
error_handler.FinishFile()
return
if _IsHtml(filename):
source_file = htmlutil.GetScriptLines(source)
else:
source_file = source
token, tokenizer_mode = _Tokenize(source_file)
error_handler.HandleFile(filename, token)
# If we did not end in the basic mode, this a failed parse.
if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
error_handler.HandleError(
error.Error(errors.FILE_IN_BLOCK,
'File ended in mode "%s".' % tokenizer_mode,
_GetLastNonWhiteSpaceToken(token)))
# Run the ECMA pass
error_token = None
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
is_limited_doc_check = (
_IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
_RunChecker(token, error_handler,
is_limited_doc_check,
is_html=_IsHtml(filename),
stop_token=error_token)
error_handler.FinishFile()
def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
"""Run a metadata pass over a token stream.
Args:
start_token: The first token in a token stream.
metadata_pass: Metadata pass to run.
error_handler: The error handler to report errors to.
filename: Filename of the source.
Returns:
The token where the error occurred (if any).
"""
try:
metadata_pass.Process(start_token)
except ecmametadatapass.ParseError, parse_err:
if flags.FLAGS.error_trace:
traceback.print_exc()
error_token = parse_err.token
error_msg = str(parse_err)
error_handler.HandleError(
error.Error(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.'
'\nError "%s"' % (error_token, error_msg)), error_token))
return error_token
except Exception: # pylint: disable=broad-except
traceback.print_exc()
error_handler.HandleError(
error.Error(
errors.FILE_DOES_NOT_PARSE,
'Internal error in %s' % filename))
def _RunChecker(start_token, error_handler,
limited_doc_checks, is_html,
stop_token=None):
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
style_checker = checker.JavaScriptStyleChecker(
state_tracker=state_tracker,
error_handler=error_handler)
style_checker.Check(start_token,
is_html=is_html,
limited_doc_checks=limited_doc_checks,
stop_token=stop_token)
|
aldefalco/flasky
|
refs/heads/master
|
migrations/versions/190163627111_account_confirmation.py
|
144
|
"""account confirmation
Revision ID: 190163627111
Revises: 456a945560f6
Create Date: 2013-12-29 02:58:45.577428
"""
# revision identifiers, used by Alembic.
revision = '190163627111'
down_revision = '456a945560f6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('confirmed', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'confirmed')
### end Alembic commands ###
|
faust64/ansible
|
refs/heads/devel
|
test/units/modules/network/ios/ios_module.py
|
59
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestIosModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, commands=None,
sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
cluck/mt940
|
refs/heads/develop
|
mt940/processors.py
|
1
|
import calendar
import re
def add_currency_pre_processor(currency, overwrite=True):
def _add_currency_pre_processor(transactions, tag, tag_dict, *args):
if 'currency' not in tag_dict or overwrite: # pragma: no branch
tag_dict['currency'] = currency
return tag_dict
return _add_currency_pre_processor
def date_fixup_pre_processor(transactions, tag, tag_dict, *args):
"""
Replace illegal February 30 dates with the last day of February.
German banks use a variant of the 30/360 interest rate calculation,
where each month has always 30 days even February. Python's datetime
module won't accept such dates.
"""
if tag_dict['day'] == '30' and tag_dict['month'] == '02':
year = int(tag_dict['year'], 10)
tag_dict['day'] = str(calendar.monthrange(year, 2)[1])
tag_dict['month'] = '02'
return tag_dict
def date_cleanup_post_processor(transactions, tag, tag_dict, result):
for k in ('day', 'month', 'year', 'entry_day', 'entry_month'):
result.pop(k, None)
return result
def mBank_set_transaction_code(transactions, tag, tag_dict, *args):
"""
mBank Collect uses transaction code 911 to distinguish icoming mass
payments transactions, adding transaction_code may be helpful in further
processing
"""
tag_dict['transaction_code'] = int(
tag_dict[tag.slug].split(';')[0].split(' ', 1)[0])
return tag_dict
iph_id_re = re.compile(' ID IPH: X*(?P<iph_id>\d{0,14});')
def mBank_set_iph_id(transactions, tag, tag_dict, *args):
"""
mBank Collect uses ID IPH to distinguish between virtual accounts,
adding iph_id may be helpful in further processing
"""
matches = iph_id_re.search(tag_dict[tag.slug])
if matches: # pragma no branch
tag_dict['iph_id'] = matches.groupdict()['iph_id']
return tag_dict
tnr_re = re.compile('TNR:[ \n](?P<tnr>\d+\.\d+)', flags=re.MULTILINE)
def mBank_set_tnr(transactions, tag, tag_dict, *args):
"""
mBank Collect states TNR in transaction details as unique id for
transactions, that may be used to identify the same transactions in
different statement files eg. partial mt942 and full mt940
Information about tnr uniqueness has been obtained from mBank support,
it lacks in mt940 mBank specification.
"""
matches = tnr_re.search(tag_dict[tag.slug])
if matches: # pragma no branch
tag_dict['tnr'] = matches.groupdict()['tnr']
return tag_dict
|
BozhkoAlexander/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/msvs/external_builder/external_builder.py
|
260
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
with open('external_builder.out', 'w') as f:
f.write(' '.join(sys.argv))
|
thomasrotter/sublimetext-cfml
|
refs/heads/master
|
src/component_index/documentation.py
|
2
|
import sublime
from functools import partial
from .. import utils, minihtml, documentation_helpers
SIDE_COLOR = "color(var(--bluish) blend(var(--background) 60%))"
def get_documentation(view, extended_metadata, file_path, class_name):
model_doc = build_documentation(extended_metadata, file_path, class_name)
callback = partial(
on_navigate, view, file_path, extended_metadata["function_file_map"]
)
return model_doc, callback
def get_method_documentation(
view,
extended_metadata,
file_path,
function_name,
class_name,
method_name,
method_preview,
):
model_doc = build_method_documentation(
extended_metadata, function_name, class_name, method_name, method_preview
)
callback = partial(
on_navigate, view, file_path, extended_metadata["function_file_map"]
)
return model_doc, callback
def get_method_preview(
view, extended_metadata, file_path, function_name, method_preview
):
model_doc = build_method_preview_doc(
extended_metadata, function_name, method_preview
)
callback = partial(
on_navigate, view, file_path, extended_metadata["function_file_map"]
)
return model_doc, callback
def get_function_call_params_doc(
extended_metadata, function_call_params, class_name, method_name
):
model_doc = build_function_call_params_doc(
extended_metadata, function_call_params, class_name, method_name
)
return model_doc, None
def cached_method_preview(view, cache, function_file_path, function_name):
current_color_scheme = view.settings().get("color_scheme")
if (
function_name not in cache
or cache[function_name]["color_scheme"] != current_color_scheme
):
with open(function_file_path, "r", encoding="utf-8") as f:
file_string = f.read()
cfml_minihtml_view = view.window().create_output_panel("cfml_minihtml")
cfml_minihtml_view.assign_syntax(
"Packages/" + utils.get_plugin_name() + "/syntaxes/cfml.sublime-syntax"
)
cfml_minihtml_view.run_command(
"append", {"characters": file_string, "force": True, "scroll_to_end": True}
)
cache[function_name] = {
"color_scheme": current_color_scheme,
"preview": build_method_preview(cfml_minihtml_view, function_name),
}
view.window().destroy_output_panel("cfml_minihtml")
return cache[function_name]["preview"]
def on_navigate(view, file_path, function_file_map, href):
if href == "__go_to_component":
if file_path[1] == ":":
file_path = "/" + file_path[0] + file_path[2:]
view.window().open_file(file_path)
else:
file_path = function_file_map[href.lower()]
if file_path[1] == ":":
file_path = "/" + file_path[0] + file_path[2:]
view.window().run_command(
"cfml_navigate_to_method", {"file_path": file_path, "href": href}
)
def build_documentation(extended_metadata, file_path, class_name):
model_doc = {"side_color": SIDE_COLOR}
model_doc["html"] = {
"header": documentation_helpers.span_wrap(class_name, "entity.name.class"),
"body": "",
"links": [],
}
if file_path:
model_doc["html"][
"body"
] += """
<div class="path">
<strong>path</strong>: <a href="__go_to_component">{}</a>
</div>
""".strip().format(
file_path
)
if "hint" in extended_metadata and extended_metadata["hint"]:
model_doc["html"]["body"] += documentation_helpers.card(
body=extended_metadata["hint"]
)
if "entityname" in extended_metadata and extended_metadata["entityname"]:
header = documentation_helpers.header(
"entityname", extended_metadata["entityname"], "string.quoted"
)
model_doc["html"]["body"] += documentation_helpers.card(header)
if "extends" in extended_metadata and extended_metadata["extends"]:
header = documentation_helpers.header(
"extends", extended_metadata["extends"], "entity.other.inherited-class"
)
model_doc["html"]["body"] += documentation_helpers.card(header)
for key in ["accessors", "persistent"]:
if extended_metadata[key]:
header = documentation_helpers.header(key, "true", "constant.language")
model_doc["html"]["body"] += documentation_helpers.card(header)
if len(extended_metadata["properties"]) > 0:
properties = parse_properties(file_path, extended_metadata)
if len(properties) > 0:
model_doc["html"]["body"] += "<h2>Properties</h2>"
for h, b in properties:
model_doc["html"]["body"] += documentation_helpers.card(h, b)
if len(extended_metadata["functions"]) > 0:
functions = parse_functions(file_path, extended_metadata)
if "constructor" in functions:
# we have a constructor
model_doc["html"]["body"] += "<h2>Constructor</h2>"
model_doc["html"]["body"] += documentation_helpers.card(
*functions["constructor"]
)
if len(functions["public"]) > 0:
model_doc["html"]["body"] += "<h2>Public Methods</h2>"
for h, b in functions["public"]:
model_doc["html"]["body"] += documentation_helpers.card(h, b)
if len(functions["private"]) > 0:
model_doc["html"]["body"] += "<h2>Private Methods</h2>"
for h, b in functions["private"]:
model_doc["html"]["body"] += documentation_helpers.card(h, b)
return model_doc
def build_method_preview(cfml_minihtml_view, function_name):
function_region = get_function_region(cfml_minihtml_view, function_name)
css, html = minihtml.from_view(cfml_minihtml_view, function_region)
return {"css": css, "html": html}
def build_method_documentation(
extended_metadata, function_name, class_name, method_name, method_preview=None
):
function_file_path = extended_metadata["function_file_map"][function_name]
funct = extended_metadata["functions"][function_name]
model_doc = {"side_color": SIDE_COLOR, "html": {}}
model_doc["html"]["links"] = []
model_doc["html"]["header"] = ""
if class_name:
model_doc["html"]["header"] += documentation_helpers.span_wrap(
class_name, "entity.name.class"
)
if method_name:
model_doc["html"]["header"] += "."
if method_name:
model_doc["html"]["header"] += (
documentation_helpers.span_wrap(method_name, "entity.name.function") + "()"
)
if funct["meta"]["access"] and len(funct["meta"]["access"]) > 0:
model_doc["html"]["header"] = (
documentation_helpers.span_wrap(funct["meta"]["access"], "storage.modifier")
+ " "
+ model_doc["html"]["header"]
)
if funct["meta"]["returntype"] and len(funct["meta"]["returntype"]) > 0:
model_doc["html"]["header"] += ": " + documentation_helpers.span_wrap(
funct["meta"]["returntype"], "storage.type"
)
model_doc["html"][
"body"
] = """
<div class="path">
<strong>path</strong>: <a href="{}">{}</a>
</div>
""".strip().format(
funct["name"], function_file_path
)
if "hint" in funct["meta"] and funct["meta"]["hint"]:
model_doc["html"]["body"] += documentation_helpers.card(
body=funct["meta"]["hint"]
)
if len(funct["meta"]["parameters"]) > 0:
model_doc["html"]["body"] += "<h2>ARGUMENT REFERENCE</h2>"
for arg in funct["meta"]["parameters"]:
header = documentation_helpers.param_header(arg)
body = ""
if arg["default"]:
body += (
'<p><em>Default:</em> <span class="code">'
+ str(arg["default"])
+ "</span></p>"
)
if "hint" in arg and arg["hint"]:
body += arg["hint"]
model_doc["html"]["body"] += documentation_helpers.card(header, body)
model_doc["html"]["body"] += "\n"
if method_preview:
css = method_preview["css"].replace("<style>", "").replace("</style>", "")
model_doc["html"]["styles"] = css
model_doc["html"]["body"] += method_preview["html"]
return model_doc
def build_method_preview_doc(extended_metadata, function_name, method_preview):
function_file_path = extended_metadata["function_file_map"][function_name]
funct = extended_metadata["functions"][function_name]
preview = {"side_color": SIDE_COLOR, "html": {}}
preview["html"]["link"] = (
'<strong>path</strong>: <a class="plain-link" href="'
+ funct["name"]
+ '">'
+ function_file_path
+ "</a>"
)
css = method_preview["css"].replace("<style>", "").replace("</style>", "")
preview["html"]["styles"] = css
preview["html"]["body"] = method_preview["html"]
return preview
def build_function_call_params_doc(
extended_metadata, function_call_params, class_name, method_name
):
model_doc = {"side_color": SIDE_COLOR, "html": {}}
funct = extended_metadata["functions"][function_call_params.function_name]
model_doc["html"]["header"] = ""
if class_name:
model_doc["html"]["header"] += documentation_helpers.span_wrap(
class_name, "entity.name.class"
)
if method_name:
model_doc["html"]["header"] += "."
if method_name:
model_doc["html"]["header"] += (
documentation_helpers.span_wrap(method_name, "entity.name.function") + "()"
)
if funct["meta"]["access"] and len(funct["meta"]["access"]) > 0:
model_doc["html"]["header"] = (
documentation_helpers.span_wrap(funct["meta"]["access"], "storage.modifier")
+ " "
+ model_doc["html"]["header"]
)
if funct["meta"]["returntype"] and len(funct["meta"]["returntype"]) > 0:
model_doc["html"]["header"] += ": " + documentation_helpers.span_wrap(
funct["meta"]["returntype"], "storage.type"
)
model_doc["html"]["arguments"] = ""
model_doc["html"]["body"] = ""
description_args = []
if len(funct["meta"]["parameters"]) > 0:
for index, arg in enumerate(funct["meta"]["parameters"]):
if function_call_params.named_params:
active_name = (
function_call_params.params[function_call_params.current_index][0]
or ""
)
is_active = active_name.lower() == arg["name"].lower()
else:
is_active = index == function_call_params.current_index
if is_active:
model_doc["html"]["body"] += (
"type: "
+ documentation_helpers.span_wrap(
(arg["type"] if arg["type"] else "any"), "storage.type"
)
+ "<br>"
)
model_doc["html"]["body"] += "required: " + (
"true<br>" if arg["required"] else "false<br>"
)
if arg["default"]:
model_doc["html"]["body"] += "default: " + arg["default"] + "<br>"
if "hint" in arg and arg["hint"]:
model_doc["html"]["body"] += "<p>" + arg["hint"] + "</p>"
description_args.append(
'<span class="active">' + arg["name"] + "</span>"
)
elif arg["required"]:
description_args.append(
'<span class="required">' + arg["name"] + "</span>"
)
else:
description_args.append(
'<span class="optional">' + arg["name"] + "</span>"
)
model_doc["html"]["arguments"] = "(" + ", ".join(description_args) + ")"
return model_doc
def parse_functions(file_path, metadata):
result = {}
constructor = metadata["initmethod"].lower() if metadata["initmethod"] else "init"
functions = metadata["functions"]
function_file_map = metadata["function_file_map"]
public_functions = [
(functions[key], function_file_map[key])
for key in sorted(functions)
if key != constructor and is_public_function(functions[key])
]
private_functions = [
(functions[key], function_file_map[key])
for key in sorted(functions)
if key != constructor and not is_public_function(functions[key])
]
result["public"] = [
parse_function(function, funct_file_path, file_path)
for function, funct_file_path in public_functions
]
result["private"] = [
parse_function(function, funct_file_path, file_path)
for function, funct_file_path in private_functions
]
if constructor in functions:
result["constructor"] = parse_function(
functions[constructor], function_file_map[constructor], file_path
)
return result
def is_public_function(function):
if function["meta"]["access"] and function["meta"]["access"] == "private":
return False
return True
def parse_function(function, funct_file_path, file_path):
header = '<a class="plain-link" href="' + function["name"] + '">'
header += (
documentation_helpers.span_wrap(function["name"], "entity.name.function")
+ "("
+ ("..." if function["meta"]["parameters"] else "")
+ ")"
)
if function["meta"]["returntype"]:
header += ": " + documentation_helpers.span_wrap(
function["meta"]["returntype"], "storage.type"
)
header += "</a>"
body = ""
if funct_file_path != file_path:
body += " <small><em>(from " + funct_file_path.split("/")[-1] + ")</em></small>"
if "hint" in function["meta"]:
body += '<div class="doc-box">' + function["meta"]["hint"] + "</div>"
arg_strings = []
for arg in function["meta"]["parameters"]:
arg_string = documentation_helpers.param_header(arg)
if arg["default"]:
arg_string += "<br>Default: " + arg["default"]
if "hint" in arg and arg["hint"]:
arg_string += "<div>" + arg["hint"] + "</div>"
arg_strings.append(arg_string)
if len(arg_strings) > 0:
body += "<ul><li>" + "</li><li>".join(arg_strings) + "</li></ul>"
return header, body
def parse_properties(file_path, metadata):
properties = metadata["properties"]
property_file_map = metadata["property_file_map"]
sorted_properties = [
(properties[key], property_file_map[key]) for key in sorted(properties)
]
return [
parse_property(prop, prop_file_path, file_path)
for prop, prop_file_path in sorted_properties
]
def parse_property(prop, prop_file_path, file_path):
header = (
"<strong>"
+ prop["name"]
+ ": "
+ documentation_helpers.span_wrap(prop["meta"]["type"], "storage.type")
+ "</strong>"
)
if prop_file_path != file_path:
header += (
" <small><em>(from " + prop_file_path.split("/")[-1] + ")</em></small>"
)
body = ""
accessors = [key for key in ["setter", "getter"] if prop["meta"][key]]
if accessors:
body += (
"<small><strong>accessors</strong>: <em>"
+ ", ".join(accessors)
+ "</em></small>"
)
if "inject" in prop["meta"] and prop["meta"]["inject"]:
body += "inject: " + prop["meta"]["inject"]
return header, body
def get_function_region(view, function_name):
functions = view.find_by_selector("entity.name.function -meta.function.body")
for funct_region in functions:
if view.substr(funct_region).lower() == function_name:
pt = funct_region.begin()
break
else:
return None
if view.match_selector(pt, "meta.function.cfml"):
# tag function
decl = utils.get_scope_region_containing_point(view, pt, "meta.function.cfml")
body = utils.get_scope_region_containing_point(
view, decl.end() + 1, "meta.function.body.tag.cfml"
)
end = utils.get_scope_region_containing_point(
view, body.end() + 1, "meta.tag.cfml"
)
return sublime.Region(decl.begin(), end.end())
else:
# script function
decl = utils.get_scope_region_containing_point(
view, pt, "meta.function.declaration.cfml"
)
body = utils.get_scope_region_containing_point(
view, decl.end() + 1, "meta.function.body.cfml"
)
return sublime.Region(decl.begin(), body.end())
|
jcai19/smm_gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/simd128/integer/shift/__init__.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["left_logical_shift",
"right_logical_shift",
"right_arithmetic_shift"]
microcode = '''
# 128 bit multimedia and scientific instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
|
prodigeni/unbound-dns64
|
refs/heads/master
|
libunbound/python/doc/conf.py
|
26
|
# -*- coding: utf-8 -*-
#
# Unbound documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../../../')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../../../.libs/')))
#print sys.path
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'pyUnbound'
copyright = '2009, Zdenek Vasicek, Marek Vavrusa'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Unbounddoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Unbound.tex', 'Unbound Documentation',
'Zdenek Vasicek, Marek Vavrusa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
willhardy/django
|
refs/heads/master
|
tests/empty/models.py
|
542
|
"""
Empty model tests
These test that things behave sensibly for the rare corner-case of a model with
no fields.
"""
from django.db import models
class Empty(models.Model):
pass
|
openstack/senlin-dashboard
|
refs/heads/master
|
senlin_dashboard/cluster/receivers/tests.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from senlin_dashboard import api
from senlin_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:cluster:receivers:index')
CREATE_URL = reverse('horizon:cluster:receivers:create')
class ReceiversTest(test.TestCase):
@test.create_mocks({api.senlin: ('receiver_list',)})
def test_index(self):
receivers = self.receivers.list()
self.mock_receiver_list.return_value = receivers
res = self.client.get(INDEX_URL)
self.assertContains(res, '<h1>Receivers</h1>')
self.assertTemplateUsed(res, 'cluster/receivers/index.html')
self.assertEqual(1, len(receivers))
self.mock_receiver_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('receiver_list',)})
def test_index_receiver_list_exception(self):
self.mock_receiver_list.side_effect = (
self.exceptions.senlin)
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'cluster/receivers/index.html')
self.assertEqual(0, len(res.context['receivers_table'].data))
self.assertMessageCount(res, error=1)
self.mock_receiver_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('receiver_list',)})
def test_index_no_receiver(self):
self.mock_receiver_list.return_value = []
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'cluster/receivers/index.html')
self.assertContains(res, 'No items to display')
self.assertEqual(0, len(res.context['receivers_table'].data))
self.mock_receiver_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('receiver_create',
'cluster_list')})
def test_create_receiver(self):
clusters = self.clusters.list()
data = {
'name': 'test-receiver',
'type': 'webhook',
'cluster_id': '123456',
'action': 'CLUSTER_SCALE_IN',
'params': ''
}
formdata = {
'name': 'test-receiver',
'type': 'webhook',
'cluster_id': '123456',
'action': 'CLUSTER_SCALE_IN',
'params': ''
}
self.mock_cluster_list.return_value = (
(clusters, False, False))
self.mock_receiver_create.return_value = data
res = self.client.post(CREATE_URL, formdata)
self.assertNoFormErrors(res)
|
1065865483/0python_script
|
refs/heads/master
|
four/Webdriver/scroll_window.py
|
1
|
from selenium import webdriver
from time import sleep
driver=webdriver.Firefox()
driver.get('http://www.51zxw.net/')
sleep(2)
#webdriver不能操作的方法可通过js实现
#将滚动条拖到底部-
js="var action=document.documentElement.scrollTop=10000"
driver.execute_script(js)
sleep(2)
#将滚动条拖到顶部
js="var action=document.documentElement.scrollTop=0"
driver.execute_script(js)
sleep(2)
driver.quit()
|
songfj/calibre
|
refs/heads/master
|
src/calibre/gui2/preferences/saving.py
|
14
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, \
AbortCommit
from calibre.gui2.preferences.saving_ui import Ui_Form
from calibre.utils.config import ConfigProxy
from calibre.library.save_to_disk import config
from calibre.gui2 import gprefs
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.proxy = ConfigProxy(config())
r = self.register
for x in ('asciiize', 'update_metadata', 'save_cover', 'write_opf',
'replace_whitespace', 'to_lowercase', 'formats', 'timefmt'):
r(x, self.proxy)
r('show_files_after_save', gprefs)
self.save_template.changed_signal.connect(self.changed_signal.emit)
def initialize(self):
ConfigWidgetBase.initialize(self)
self.save_template.blockSignals(True)
self.save_template.initialize('save_to_disk', self.proxy['template'],
self.proxy.help('template'),
self.gui.library_view.model().db.field_metadata)
self.save_template.blockSignals(False)
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
self.save_template.set_value(self.proxy.defaults['template'])
def commit(self):
if not self.save_template.validate():
raise AbortCommit('abort')
self.save_template.save_settings(self.proxy, 'template')
return ConfigWidgetBase.commit(self)
def refresh_gui(self, gui):
gui.iactions['Save To Disk'].reread_prefs()
# Ensure worker process reads updated settings
gui.spare_pool().shutdown()
if __name__ == '__main__':
from PyQt5.Qt import QApplication
app = QApplication([])
test_widget('Import/Export', 'Saving')
|
cvalenzu/acalib
|
refs/heads/master
|
acalib/algorithms/stacking.py
|
1
|
import acalib
from .algorithm import Algorithm
from numpy import mean
from scipy.stats import signaltonoise
import scipy.ndimage as scnd
from astropy.nddata import NDData,NDDataRef
class Stacking(Algorithm):
"""
Create a stacked image using a template image and a set of different images from same object.
"""
def default_params(self):
pass
def run(self, template_data, images):
"""
Run the stacking algorithm given a template image and a container of images.
Parameters
----------
template_data : (M,N) numpy.ndarray
Astronomical image.
images : list of (M,N) numpy.ndarray
A list of images.
Returns
-------
result : (M,N) numpy.ndarray
Image stacked
"""
if type(template_data) is NDData or type(template_data) is NDDataRef:
template_data = template_data.data
for i in range(len(images)):
if type(images[i]) is not NDData or type(images[i]) is not NDDataRef:
images[i] = NDDataRef(images[i])
tprops = acalib.core.transform.fits_props(template_data)
# TODO: Replace with core.transform.scale once it stops using
# a acalib.container.
majorAxisTemplate = tprops['major']
scaledData = []
for i in range(len(images)):
prop = acalib.core.transform.fits_props(images[i].data)
sc = majorAxisTemplate / prop['major']
scaledData.append(scnd.zoom(prop['orig'], sc))
scaled = scaledData
rotated, angles = acalib.core.transform.rotate(scaled, tprops['angle'])
aligned = acalib.core.transform.crop_and_align(rotated, angles)
result = mean(aligned,axis=0)
return result
|
alebcay/android_kernel_oneplus_msm8974
|
refs/heads/m-caf
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
EarthTeam/earthteam.net
|
refs/heads/master
|
sites/all/libraries/geos-3.4.2/swig/python/tests/example.py
|
29
|
import os
import sys
import distutils.util
import math
# Put local build directory on head of python path
platformdir = '-'.join((distutils.util.get_platform(),
'.'.join(map(str, sys.version_info[0:2]))))
sys.path.insert(0, os.path.join('build', 'lib.' + platformdir))
# import geos from the local build directory
import geos
pm = geos.PrecisionModel(2.0,0,0)
global_factory = geos.GeometryFactory(pm,-1)
def wkt_print_geoms(geoms):
wkt = geos.WKTWriter()
size = len(geoms)
for i in range(size):
tmp = wkt.write(geoms[i])
print "[%d]" % i, tmp
def create_point(x,y):
c = geos.Coordinate(x,y)
p = global_factory.createPoint(c)
return p
def create_ushaped_linestring(xoffset, yoffset, side):
cl = geos.DefaultCoordinateSequence()
cl.add(geos.Coordinate(xoffset, yoffset))
cl.add(geos.Coordinate(xoffset, yoffset+side))
cl.add(geos.Coordinate(xoffset+side, yoffset+side))
cl.add(geos.Coordinate(xoffset+side, yoffset))
ls = global_factory.createLineString(cl)
return ls
def create_square_linearring(xoffset,yoffset,side):
cl = geos.DefaultCoordinateSequence()
cl.add(geos.Coordinate(xoffset,yoffset))
cl.add(geos.Coordinate(xoffset,yoffset+side))
cl.add(geos.Coordinate(xoffset+side,yoffset+side))
cl.add(geos.Coordinate(xoffset+side,yoffset))
cl.add(geos.Coordinate(xoffset,yoffset))
lr = global_factory.createLinearRing(cl)
return lr
def create_square_polygon(xoffset,yoffset,side):
outer = create_square_linearring(xoffset,yoffset,side)
inner = create_square_linearring(xoffset+(side/3.),yoffset+(side/3.),(side/3.))
holes = geos.vector_GeometryP()
holes.push_back(inner)
poly = global_factory.createPolygon(outer,holes)
return poly
def create_simple_collection(geoms):
collect = geos.vector_GeometryP()
for i in geoms:
collect.push_back(i)
return global_factory.createGeometryCollection(collect)
def create_circle(centerX,centerY,radius):
shapefactory = geos.GeometricShapeFactory(global_factory)
shapefactory.setCentre(geos.Coordinate(centerX, centerY))
shapefactory.setSize(radius)
return shapefactory.createCircle()
def create_ellipse(centerX,centerY,width,height):
shapefactory = geos.GeometricShapeFactory(global_factory)
shapefactory.setCentre(geos.Coordinate(centerX, centerY))
shapefactory.setHeight(height)
shapefactory.setWidth(width)
return shapefactory.createCircle()
def create_rectangle(llX,llY,width,height):
shapefactory = geos.GeometricShapeFactory(global_factory)
shapefactory.setBase(geos.Coordinate(llX, llY))
shapefactory.setHeight(height)
shapefactory.setWidth(width)
shapefactory.setNumPoints(4)
return shapefactory.createRectangle()
def create_arc(llX,llY,width,height,startang,endang):
shapefactory = geos.GeometricShapeFactory(global_factory)
shapefactory.setBase(geos.Coordinate(llX, llY))
shapefactory.setHeight(height)
shapefactory.setWidth(width)
#shapefactory.setNumPoints(100) #the default (100 pts)
return shapefactory.createArc(startang, endang)
def do_all():
geoms = []
geoms.append(create_point(150, 350))
geoms.append(create_ushaped_linestring(60,60,100))
geoms.append(create_square_linearring(0,0,100))
geoms.append(create_square_polygon(0,200,300))
geoms.append(create_square_polygon(0,250,300))
geoms.append(create_simple_collection(geoms))
# These ones use a GeometricShapeFactory
geoms.append(create_circle(0, 0, 10))
geoms.append(create_ellipse(0, 0, 8, 12))
geoms.append(create_rectangle(-5, -5, 10, 10)) # a square
geoms.append(create_rectangle(-5, -5, 10, 20)) # a rectangle
# The upper-right quarter of a vertical ellipse
geoms.append(create_arc(0, 0, 10, 20, 0, math.pi/2))
print "--------HERE ARE THE BASE GEOMS ----------"
wkt_print_geoms(geoms)
####################
# UNARY OPERATIONS #
####################
################
#CENTROID #
################
# Find centroid of each base geometry
newgeoms = []
for i in range(len(geoms)):
newgeoms.append(geoms[i].getCentroid())
print "\n","------- AND HERE ARE THEIR CENTROIDS -----"
wkt_print_geoms(newgeoms)
################
# BUFFER #
################
newgeoms = []
for i in range(len(geoms)):
try:
newgeoms.append(geoms[i].buffer(10))
except geos.GEOSException():
exc = geos.GEOSException()
print "GEOS Exception: geometry ",geoms[i],"->buffer(10): ",exc.toString()
print "\n","--------HERE COMES THE BUFFERED GEOMS ----------"
wkt_print_geoms(newgeoms)
################
# CONVEX HULL #
################
newgeoms = []
for i in range(len(geoms)):
newgeoms.append(geoms[i].convexHull())
print "\n","--------HERE COMES THE HULLS----------"
wkt_print_geoms(newgeoms)
####################
# RELATIONAL OPERATORS #
####################
print "-------------------------------------------------------------------------------"
print "RELATIONAL OPERATORS"
print "-------------------------------------------------------------------------------"
size = len(geoms)
################
# DISJOINT #
################
print
print "\t".join([" DISJOINT "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].disjoint(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# TOUCHES #
################
print
print "\t".join([" TOUCHES "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].touches(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# INTERSECTS #
################
print
print "\t".join([" INTERSECTS "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].intersects(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# CROSSES #
################
print
print "\t".join([" CROSSES "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].crosses(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# WITHIN #
################
print
print "\t".join([" WITHIN "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].within(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# CONTAINS #
################
print
print "\t".join([" CONTAINS "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].contains(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# OVERLAPS #
################
print
print "\t".join([" OVERLAPS "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].overlaps(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# RELATE #
################
print
print "\t".join([" RELATE "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
im = geos.IntersectionMatrix('')
try:
if geoms[i].relate(geoms[j],"212101212"):
print " 1\t",
else:
print " 0\t",
im=geoms[i].relate(geoms[j])
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# EQUALS #
################
print
print "\t".join([" EQUALS "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].equals(geoms[j]):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# EQUALS_EXACT #
################
print
print "\t".join(["EQUALS_EXACT "]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].equalsExact(geoms[j],0.5):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
################
# IS_WITHIN_DISTANCE #
################
print
print "\t".join(["IS_WITHIN_DIST"]+["[%d]" % i for i in range(size)])
for i in range(size):
print " [%d]\t" % i,
for j in range(size):
try:
if geoms[i].isWithinDistance(geoms[j],2):
print " 1\t",
else:
print " 0\t",
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
print " X\t",
print
####################
# COMBINATIONS
####################
print
print "-------------------------------------------------------------------------------"
print "COMBINATIONS"
print "-------------------------------------------------------------------------------"
################
# UNION
################
newgeoms = []
for i in range(size-1):
for j in range(i+1,size):
try:
newgeoms.append(geoms[i].Union(geoms[j]))
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
pass
print "\n", "----- AND HERE ARE SOME UNION COMBINATIONS ------"
wkt_print_geoms(newgeoms)
################
# INTERSECTION
################
newgeoms = []
for i in range(size-1):
for j in range(i+1,size):
try:
newgeoms.append(geoms[i].intersection(geoms[j]))
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
pass
print "\n", "----- HERE ARE SOME INTERSECTIONS COMBINATIONS ------"
wkt_print_geoms(newgeoms)
################
# DIFFERENCE
################
newgeoms = []
for i in range(size-1):
for j in range(i+1,size):
try:
newgeoms.append(geoms[i].difference(geoms[j]))
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
pass
print "\n", "----- HERE ARE SOME DIFFERENCE COMBINATIONS ------"
wkt_print_geoms(newgeoms)
################
# SYMMETRIC DIFFERENCE
################
newgeoms = []
for i in range(size-1):
for j in range(i+1,size):
try:
newgeoms.append(geoms[i].symDifference(geoms[j]))
except geos.GEOSException():
exc = geos.GEOSException()
print exc.toString()
except:
pass
print "\n", "----- HERE ARE SYMMETRIC DIFFERENCES ------"
wkt_print_geoms(newgeoms)
################
# LINEMERGE
################
temp = geos.vector_GeometryP()
for g in geoms:
temp.push_back(g)
lm = geos.LineMerger()
lm.add(temp)
mls = lm.getMergedLineStrings()
newgeoms = []
for i in range(mls.size()):
newgeoms.append(mls[i])
del mls
print "\n", "----- HERE IS THE LINEMERGE OUTPUT ------"
wkt_print_geoms(newgeoms)
################
# POLYGONIZE
################
temp = geos.vector_GeometryP()
for g in geoms:
temp.push_back(g)
plgnzr = geos.Polygonizer()
plgnzr.add(temp)
polys = plgnzr.getPolygons()
newgeoms = []
for i in range(polys.size()):
newgeoms.append(polys[i])
del polys
print "\n", "----- HERE IS POLYGONIZE OUTPUT ------"
wkt_print_geoms(newgeoms)
print "GEOS", geos.geosversion(), "ported from JTS", geos.jtsport()
do_all()
|
hardboiled65/Seshat
|
refs/heads/master
|
configure.py
|
1
|
#!/usr/bin/env python3
# Seshat config tool.
# This is not a part of autotools.
import sys
import os
import platform
import shutil
import xml.etree.ElementTree
import subprocess
ICU_MIN_VERSION = "59.1"
UNICODE_VERSION = "10.0"
SESHAT_VERSION_MAJOR = 0
SESHAT_VERSION_MINOR = 1
SESHAT_VERSION_PATCH = 0
SESHAT_VERSION = "{}.{}.{}".format(
SESHAT_VERSION_MAJOR,
SESHAT_VERSION_MINOR,
SESHAT_VERSION_PATCH)
options = {
'SESHAT_ICU_BACKEND': False,
'SESHAT_IGNORE_ICU_VERSION': False,
'SESHAT_INFO_FLAGS': '-DSESHAT_BUILD_DATE=\\"`date -u +%Y-%m-%dT%H:%M:%SZ`\\"',
'CXXFLAGS': '-Wall',
}
makefile_template = '''# This file is generated by configure.py
VERSION = {seshat_version}
VERSION_MAJOR = {seshat_version_major}
VERSION_MINOR = {seshat_version_minor}
VERSION_PATCH = {seshat_version_patch}
OBJ = {m_OBJ_LIST}
CXX = {m_CXX}
CXXFLAGS = {m_CXXFLAGS}
SESHAT_INFO_FLAGS = {m_SESHAT_INFO_FLAGS}
export CXX
default: $(OBJ)
\tmkdir -p lib
\t$(CXX) -std=c++11 -shared \
-o lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH) \
-Wl,-soname,libseshat.so.$(VERSION) \
$^ -Iinclude
\trm -f lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR)
\trm -f lib/libseshat.so.$(VERSION_MAJOR)
\trm -f lib/libseshat.so
\tln -s libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH) \
lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR)
\tln -s libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR) \
lib/libseshat.so.$(VERSION_MAJOR)
\tln -s libseshat.so.$(VERSION_MAJOR) \
lib/libseshat.so
\t$(MAKE) -C tools/
test:
\t$(MAKE) -C tests/
static: $(OBJ)
\tmkdir -p lib
\tar rcs lib/libseshat.a $^
src/info.o: src/info.cpp
\t$(CXX) -std=c++11 $(CXXFLAGS) $(SESHAT_INFO_FLAGS) -c -Iinclude -o $@ $<
src/%.o: src/%.cpp
\t$(CXX) -std=c++11 $(CXXFLAGS) -c -Iinclude -o $@ $<
install:
\tcp -P lib/libseshat.so* /usr/local/lib/
clean:
\trm -f src/*.o
\trm -f src/ucd/*.o
\trm -f src/emoji/*.o
\trm -f src/icu/*.o
\trm -rf lib
\t$(MAKE) -C tools/ -f Makefile clean
\t$(MAKE) -C tests/ -f Makefile clean
'''
obj_list = []
# Object file append functions
def append_obj(cpp_path):
for fname in os.listdir(cpp_path):
(name, ext) = os.path.splitext(fname)
if ext == '.cpp':
obj_list.append(os.path.normpath(cpp_path) + '/' + name + '.o')
def append_src():
append_obj('./src')
def append_ucd():
append_obj('./src/ucd')
append_obj('./src/emoji')
def append_icu():
append_obj('./src/icu')
obj_list.append('src/ucd/normalization_props.o')
obj_list.append('src/ucd/dm.o')
obj_list.append('src/emoji/data.o')
# Detect platform
def detect_platform():
# Get OS is 32bit or 64bit.
# Note that platform.architecture() is not about OS but python interpreter.
arch = platform.architecture()[0]
os_bit = 0
if arch == '64bit':
os_bit = 64
else:
os_bit = 32
if os_bit == 64:
# Exception for cygwin
if sys.platform == 'cygwin':
return
options['CXXFLAGS'] += ' -fPIC'
# Detect compiler
def detect_compiler():
# Get CXX environment variable.
cxx_env = os.getenv('CXX')
if cxx_env != None:
print('CXX environment variable is set as "{}".'.format(cxx_env))
cxx = cxx_env
elif shutil.which('clang++') != None:
cxx = 'clang++'
elif shutil.which('g++') != None:
cxx = 'g++'
else:
print('It seems any C++ compiler installed in this system.')
exit(1)
options['CXX'] = cxx
# Detect ICU version
def detect_icu():
icu_info = {}
if shutil.which('icuinfo') != None:
icuinfo = subprocess.check_output('icuinfo').decode()
icuinfo = icuinfo[:icuinfo.rindex('>')+1]
icuinfo = icuinfo.replace('&', '&')
tree = xml.etree.ElementTree.fromstring(icuinfo)
for e in tree:
icu_info[e.get('name')] = e.text
icu_version = icu_info['version'].split('.')
min_version = ICU_MIN_VERSION.split('.')
uni_version = icu_info['version.unicode'].split('.')
min_uni_version = UNICODE_VERSION.split('.')
if icu_version < min_version:
if options['SESHAT_IGNORE_ICU_VERSION']:
return
print('Seshat requires ICU version {} or later, but version installed your system is {}'.format(ICU_MIN_VERSION, icu_info['version']))
exit(1)
if uni_version < min_uni_version:
if options['SESHAT_IGNORE_ICU_VERSION']:
return
print('Seshat requires ICU which supports Unicode version {} or later, but ICU installed your system supports until {}'.format(UNICODE_VERSION, icu_info['version.unicode']))
else:
print('icuinfo: command not found.')
exit(1)
# Print options
def print_options():
for k, v in options.items():
print('{}={}'.format(k, v))
def print_help():
print('Usage: ./configure.py [--help] <arguments>')
print('Arguments')
print(' --help print this help')
print(' --icu-backend use ICU as backend instead of seshat')
print(' implementation')
print(' --ignore-icu-version')
print(' ignore ICU version check')
exit()
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == '--help':
print_help()
if '--icu-backend' in sys.argv:
options['SESHAT_ICU_BACKEND'] = True
options['CXXFLAGS'] += ' -DSESHAT_ICU_BACKEND'
if '--ignore-icu-version' in sys.argv:
options['SESHAT_IGNORE_ICU_VERSION'] = True
append_src()
if options['SESHAT_ICU_BACKEND'] == True:
detect_icu()
append_icu()
else:
append_ucd()
detect_platform()
detect_compiler()
print_options()
output = makefile_template.format(
seshat_version=SESHAT_VERSION,
seshat_version_major=SESHAT_VERSION_MAJOR,
seshat_version_minor=SESHAT_VERSION_MINOR,
seshat_version_patch=SESHAT_VERSION_PATCH,
m_OBJ_LIST=' '.join(obj_list),
m_CXXFLAGS=options['CXXFLAGS'],
m_CXX=options['CXX'],
m_SESHAT_INFO_FLAGS=options['SESHAT_INFO_FLAGS'])
f = open('Makefile', 'w')
f.write(output)
f.close()
|
habeanf/Open-Knesset
|
refs/heads/upmaster
|
apis/urls.py
|
11
|
from django.conf.urls import url, patterns, include
from resources import v2_api
urlpatterns = patterns(
'',
(r'^', include(v2_api.urls)),
url(
r'^v2/doc/',
include('tastypie_swagger.urls', namespace='tastypie_swagger'),
kwargs={
"tastypie_api_module": "apis.resources.v2_api",
"namespace": "tastypie_swagger", "version": "2.0"
}
)
)
|
finger563/learningTF
|
refs/heads/master
|
src/intro/mnist_softmax.py
|
30
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
hottwaj/django
|
refs/heads/master
|
django/contrib/admin/models.py
|
184
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext, ugettext_lazy as _
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
use_in_migrations = True
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
self.model.objects.create(
user_id=user_id,
content_type_id=content_type_id,
object_id=smart_text(object_id),
object_repr=object_repr[:200],
action_flag=action_flag,
change_message=change_message,
)
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.is_addition():
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.is_change():
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.is_deletion():
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
|
kaustubhhiware/hiPy
|
refs/heads/master
|
think_python/birthdays.py
|
1
|
import random
def length(listed):
"""
return length of list
"""
count = 0
for item in listed:
count += 1
return count
def has_duplicates(listed):
"""
check for duplicates
Birthday paradox!
"""
for index in range(length(listed)):
j = index+1
while j!=length(listed):
if listed[j]==listed[index]:
#duplicate spotted!
return True
j += 1
return False
def random_bdays(n):
"""
Returns a list of integers between 1 and 365, with length (n)
"""
t = []
for i in range(n):
bday = random.randint(1, 365)
t.append(bday)
return t
def count_matches(students, samples):
"""
check how many samples have repeated birthdays
"""
count = 0
for i in range(samples):
t = random_bdays(students)
if has_duplicates(t):
count += 1
return count
#23 is a great number for birthday paradox
num_students = raw_input("Enter number of students there :")
num_students = int(num_students)
num_times = raw_input("Hoe many simulations : ")
num_times = int(num_times)
count = count_matches(num_students,num_times)
chance = 100.0*count/num_times
print 'Chances of 2 people sharing their birthdays : ',chance
|
jlongstaf/f5-openstack-lbaasv2-driver
|
refs/heads/master
|
test/dev/testgetservice.py
|
5
|
import os
import sys
import time
from neutron.common import rpc as q_rpc
from neutron import context
from neutronclient.v2_0 import client as q_client
from oslo_config import cfg
import oslo_messaging as messaging
from f5lbaasdriver.v2.bigip import constants_v2
lb_dict = {
'loadbalancer': {
'vip_subnet_id': '85540bed-ea58-478f-b408-b51ff5c9e95e',
'tenant_id': 'b8b1cb597c8b4cc9b452625c1c6d7da2',
'name': 'lb1'
}
}
def main():
username = ""
password = ""
auth_url = ""
if 'OS_USERNAME' in os.environ:
username = os.environ['OS_USERNAME']
else:
print("OS_USERNAME not defined in environment")
sys.exit(1)
if 'OS_PASSWORD' in os.environ:
password = os.environ['OS_PASSWORD']
else:
print("OS_PASSWORD not defined in environment")
sys.exit(1)
if 'OS_TENANT_NAME' in os.environ:
tenant_name = os.environ['OS_TENANT_NAME']
else:
print("OS_TENANT_NAME not defined in environment")
sys.exit(1)
if 'OS_AUTH_URL' in os.environ:
auth_url = os.environ['OS_AUTH_URL']
else:
print("OS_AUTH_URL not defined in environment")
sys.exit(1)
neutron = q_client.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
subnets = neutron.list_subnets()['subnets']
for subnet in subnets:
if subnet['name'] == 'private-subnet':
lb_dict['loadbalancer']['vip_subnet_id'] = subnet['id']
lb_dict['loadbalancer']['tenant_id'] = subnet['tenant_id']
neutron.create_loadbalancer(lb_dict)
loadbalancers = neutron.list_loadbalancers()['loadbalancers']
for loadbalancer in loadbalancers:
if loadbalancer['name'] == lb_dict['loadbalancer']['name']:
break
environment_prefix = 'Test'
topic = '%s_%s'\
% (constants_v2.TOPIC_PROCESS_ON_HOST_V2, environment_prefix)
print(topic)
q_rpc.init(cfg.CONF)
transport = messaging.get_transport(cfg.CONF)
target = messaging.Target(topic=topic)
rpc_client = messaging.RPCClient(transport, target)
ctxt = context.get_admin_context().to_dict()
print(loadbalancer['id'])
time.sleep(5)
service = rpc_client.call(ctxt, 'get_service_by_loadbalancer_id',
loadbalancer_id=loadbalancer['id'],
global_routed_mode=True,
host=None)
print(service)
neutron.delete_loadbalancer(loadbalancer['id'])
if __name__ == '__main__':
main()
|
yt752/theano_exercises
|
refs/heads/master
|
02_advanced/01_symbolic/04_two_step_backprop_soln.py
|
13
|
import numpy as np
from theano import config
from theano import function
from theano import shared
from theano import tensor as T
from theano.compat.python2x import OrderedDict
num_vis = 2
class SimpleMLP(object):
"""
An MLP with one sigmoid hidden layer and one linear output layer
(for solving regression problems).
"""
def __init__(self):
rng = np.random.RandomState([1, 2, 3])
self.num_hid = 3
self.W_hid = shared(rng.randn(num_vis, self.num_hid).astype(
config.floatX))
self.w_out = shared(rng.randn(self.num_hid).astype(config.floatX))
def fprop(self, X):
"""
X : A Theano matrix of input examples.
Each row is an example.
Each column is a feature.
Returns:
H: A Theano matrix of hidden unit values
y_hat: A Theano vector of outputs. Output i is the predicted
value for example i.
"""
H = T.nnet.sigmoid(T.dot(X, self.W_hid))
y_hat = T.dot(H, self.w_out)
return H, y_hat
def loss(y_hat, y):
"""
y_hat : A minibatch of predictions
y : A minibatch of targets
Returns an expression for the loss on this minibatch
"""
return T.sqr(y_hat - y).mean()
def two_step_backprop(mlp):
"""
mlp: A SimpleMLP instance
Returns:
f1: a theano function
Takes two arguments: a minibatch of examples and a minibatch of
targets.
Returns two values:
1) The gradient of the loss on mlp.w_out
2) An auxiliary value of your choosing
f2: Takes two arguments: a minibatch of examples, and the auxiliary
value returned by f1.
Returns the gradient of the loss on mlp.W_hid
Should not make use of mlp.w_out at all!
"""
# Run fprop
X = T.matrix()
y = T.vector()
H, y_hat = mlp.fprop(X)
l = loss(y_hat, y)
g_w, g_H = T.grad(l, [mlp.w_out, H])
f1 = function([X, y], [g_w, g_H])
known_grads = OrderedDict()
known_grads[H] = g_H
g_W = T.grad(None, mlp.W_hid, known_grads=known_grads)
f2 = function([X, g_H], g_W)
return f1, f2
if __name__ == "__main__":
mlp = SimpleMLP()
X = T.matrix()
y = T.vector()
H, y_hat = mlp.fprop(X)
l = loss(y_hat, y)
g_W, g_w = T.grad(l, [mlp.W_hid, mlp.w_out])
rng = np.random.RandomState([1, 2, 3])
m = 5
f = function([X, y], [g_W, g_w])
X = rng.randn(m, num_vis).astype(X.dtype)
y = rng.randn(m).astype(y.dtype)
g_W, g_w = f(X, y)
f1, f2 = two_step_backprop(mlp)
g_w2, aux = f1(X, y)
assert np.allclose(g_w, g_w2)
# Give w_out the wrong size to make sure f2 can't use it
mlp.w_out.set_value(np.ones(1).astype(mlp.w_out.dtype))
g_W2 = f2(X, aux)
assert np.allclose(g_W, g_W2)
print "SUCCESS!"
|
wartman4404/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_union_nullable.py
|
276
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface OneNullableInUnion {
void foo((object? or DOMString?) arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Two nullable member types of a union should have thrown.")
parser.reset()
threw = False
try:
parser.parse("""
interface NullableInNullableUnion {
void foo((object? or DOMString)? arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"A nullable union type with a nullable member type should have "
"thrown.")
parser.reset()
threw = False
try:
parser.parse("""
interface NullableInUnionNullableUnionHelper {
};
interface NullableInUnionNullableUnion {
void foo(((object? or DOMString) or NullableInUnionNullableUnionHelper)? arg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"A nullable union type with a nullable member type should have "
"thrown.")
|
manishpatell/erpcustomizationssaiimpex123qwe
|
refs/heads/master
|
addons/payment_paypal/controllers/main.py
|
260
|
# -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import urllib2
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class PaypalController(http.Controller):
_notify_url = '/payment/paypal/ipn/'
_return_url = '/payment/paypal/dpn/'
_cancel_url = '/payment/paypal/cancel/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from paypal. """
return_url = post.pop('return_url', '')
if not return_url:
custom = json.loads(post.pop('custom', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def paypal_validate_data(self, **post):
""" Paypal IPN: three steps validation to ensure data correctness
- step 1: return an empty HTTP 200 response -> will be done at the end
by returning ''
- step 2: POST the complete, unaltered message back to Paypal (preceded
by cmd=_notify-validate), with same encoding
- step 3: paypal send either VERIFIED or INVALID (single word)
Once data is validated, process it. """
res = False
new_post = dict(post, cmd='_notify-validate')
cr, uid, context = request.cr, request.uid, request.context
reference = post.get('item_number')
tx = None
if reference:
tx_ids = request.registry['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if tx_ids:
tx = request.registry['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
paypal_urls = request.registry['payment.acquirer']._get_paypal_urls(cr, uid, tx and tx.acquirer_id and tx.acquirer_id.environment or 'prod', context=context)
validate_url = paypal_urls['paypal_form_url']
urequest = urllib2.Request(validate_url, werkzeug.url_encode(new_post))
uopen = urllib2.urlopen(urequest)
resp = uopen.read()
if resp == 'VERIFIED':
_logger.info('Paypal: validated data')
res = request.registry['payment.transaction'].form_feedback(cr, SUPERUSER_ID, post, 'paypal', context=context)
elif resp == 'INVALID':
_logger.warning('Paypal: answered INVALID on data verification')
else:
_logger.warning('Paypal: unrecognized paypal answer, received %s instead of VERIFIED or INVALID' % resp.text)
return res
@http.route('/payment/paypal/ipn/', type='http', auth='none', methods=['POST'])
def paypal_ipn(self, **post):
""" Paypal IPN. """
_logger.info('Beginning Paypal IPN form_feedback with post data %s', pprint.pformat(post)) # debug
self.paypal_validate_data(**post)
return ''
@http.route('/payment/paypal/dpn', type='http', auth="none", methods=['POST'])
def paypal_dpn(self, **post):
""" Paypal DPN """
_logger.info('Beginning Paypal DPN form_feedback with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
self.paypal_validate_data(**post)
return werkzeug.utils.redirect(return_url)
@http.route('/payment/paypal/cancel', type='http', auth="none")
def paypal_cancel(self, **post):
""" When the user cancels its Paypal payment: GET on this route """
cr, uid, context = request.cr, SUPERUSER_ID, request.context
_logger.info('Beginning Paypal cancel with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
return werkzeug.utils.redirect(return_url)
|
miing/mci_migo
|
refs/heads/master
|
identityprovider/backend/base.py
|
1
|
# Copyright 2010, 2012 Canonical Ltd. This software is licensed under
# the GNU Affero General Public License version 3 (see the file
# LICENSE).
__all__ = [
'DatabaseWrapper', 'DatabaseError', 'IntegrityError',
]
import re
from hashlib import sha1
from django.conf import settings
from django.core.cache import cache
from django.db.utils import DatabaseError, IntegrityError
from django.utils.translation import ugettext as _
from .old import base as old_base
class BaseCache(object):
"""This class provides a cached version of a single database table.
Queries are routed through memcached first, so only cache misses
really hit the database. Inserts and deletes are carried out
in memcached only, so that the database is never written to.
Subclasses need to implement update if needed.
"""
table = None
primary_key = None
def insert(self, match, params, cursor):
"""Insert the row only into memcached."""
cols = [x.strip('",') for x in match.group('cols').split()]
values = dict(zip(cols, params))
key = self.cache_key(values[self.primary_key])
cache.add(key, str([params]))
def select(self, match, params, cursor):
"""Check memcached first, then hit the DB if not found."""
pkey_cond = '"%s"."%s" = %%s' % (self.table, self.primary_key)
assert pkey_cond in match.group('cond')
key = self.cache_key(params[0])
cached_value = cache.get(key)
if cached_value is None:
cursor.execute(match.group(0), params)
cached_value = cursor.fetchmany()
else:
cached_value = eval(cached_value)
return cached_value
def delete(self, match, params, cursor):
"""Mark the row as deleted in memcached.
Note that future queries to this row will produce no results,
even if there's an entry in the DB for it.
"""
delete_pattern = r'"%s" IN \(%%s\)' % self.primary_key
assert re.match(delete_pattern, match.group('cond'))
for dbkey in params[0]:
key = self.cache_key(dbkey)
cache.set(key, '[]')
def cache_key(self, dbkey):
"""Returns a canonical memcached key for a row in a table."""
hash = sha1(dbkey).hexdigest()
return 'db-%s-%s' % (self.table, hash)
class OpenIDAssociationCache(BaseCache):
table = 'openidassociation'
primary_key = 'handle'
class DjangoSessionCache(BaseCache):
table = 'django_session'
primary_key = 'session_key'
def update(self, match, params, cursor):
"""django_session is always updated in the same way, so we can map
that in to a row in the database.
"""
pkey_cond = '"%s"."%s" = %%s' % (self.table, self.primary_key)
assert pkey_cond == match.group('cond').strip()
update_format = '"session_data" = %s, "expire_date" = %s'
assert update_format == match.group('cols')
key = self.cache_key(params[2])
new_value = [params[2], params[0], params[1]]
cache.set(key, str([new_value]))
class AuthUserCache(BaseCache):
table = 'auth_user'
primary_key = 'auth_user_pkey'
def select(self, match, params, cursor):
"""Skip memcached completely."""
cursor.execute(match.group(0), params)
cached_value = cursor.fetchmany()
return cached_value
def update(self, match, params, cursor):
"""Does nothing.
During readonly mode auth_user will only be updated to set
last_login, so we ignore all updates.
"""
pass
cached_tables = {
'django_session': DjangoSessionCache(),
'openidassociation': OpenIDAssociationCache(),
'auth_user': AuthUserCache(),
}
class CursorReadOnlyWrapper(object):
command_patterns = {
'delete': r'^DELETE FROM "(?P<table>.*)" WHERE (?P<cond>.*)$',
'insert': (r'^INSERT INTO "(?P<table>.*)" \((?P<cols>.*)\) '
r'VALUES \((?P<values>.*)\)$'),
'select': (r'^SELECT (?P<cols>.*) FROM "(?P<table>.*)" '
r'WHERE (?P<cond>.*)$'),
'update': (r'^UPDATE "(?P<table>.*)" SET (?P<cols>.*) '
r'WHERE (?P<cond>.*)$'),
}
def __init__(self, cursor):
self.cursor = cursor
self.cache = None
def execute_cached(self, command, sql, params):
"""Attempt to carry out a command against memcache.
Return True if the command is successfully carried out.
"""
pattern = self.command_patterns.get(command)
if pattern is None:
return False
match = re.match(pattern, sql)
if match is not None:
table = match.group('table')
if table in cached_tables:
self.cache = cached_tables[table]
method = getattr(self.cache, command)
self._values = method(match, params, self.cursor)
return True
return False
def execute(self, sql, params=()):
command = sql.split(' ', 1)[0].lower()
executed = self.execute_cached(command, sql, params)
if executed:
return
if command in ['select', 'savepoint']:
return self.cursor.execute(sql, params)
else:
msg = (_('Attempted to %(command)s while in '
'read-only mode: \'%(sql)s\' %% (%(params)s)') %
{'command': command, 'sql': sql, 'params': params})
raise DatabaseError(msg)
def fetchone(self):
if self.cache is not None:
if len(self._values) == 0:
return None
value = self._values[0]
self._values = self._values[1:]
return value
else:
return self.cursor.fetchone()
def fetchmany(self, chunk):
if self.cache is not None:
if len(self._values) == 0:
raise StopIteration()
values = self._values[:chunk]
self._values = self._values[chunk:]
return values
else:
return self.cursor.fetchmany(chunk)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
class DatabaseWrapper(old_base.DatabaseWrapper):
def _cursor(self, *args):
cursor = super(DatabaseWrapper, self)._cursor(*args)
if settings.READ_ONLY_MODE:
cursor = CursorReadOnlyWrapper(cursor)
return cursor
|
halberom/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/rackspace/rax_files.py
|
50
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files:
state: list
- name: "Create container called 'mycontainer'"
rax_files:
container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: someuser@example.com
- name: "Set a container's web index page"
rax_files:
container: mycontainer
web_index: index.html
- name: "Set a container's web error page"
rax_files:
container: mycontainer
web_error: error.html
- name: "Make container public"
rax_files:
container: mycontainer
public: yes
- name: "Make container public with a 24 hour TTL"
rax_files:
container: mycontainer
public: yes
ttl: 86400
- name: "Make container private"
rax_files:
container: mycontainer
private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: someuser@example.com
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError as e:
HAS_PYRAX = False
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception as e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer as e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
if __name__ == '__main__':
main()
|
fgaudin/aemanager
|
refs/heads/master
|
faq/models.py
|
1
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
order = models.IntegerField(default=0, verbose_name=_('Order'))
class Meta:
ordering = ['order']
def __unicode__(self):
return self.label
class QuestionAnswer(models.Model):
question = models.TextField(verbose_name=_('Question'))
answer = models.TextField(verbose_name=_('Answer'))
category = models.ForeignKey(Category, verbose_name=_('Category'), related_name='questions')
order = models.IntegerField(default=0, verbose_name=_('Order'))
class Meta:
ordering = ['category', 'order']
def __unicode__(self):
return self.question
|
zakuro9715/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/modeltests/files/tests.py
|
38
|
import shutil
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from models import Storage, temp_storage, temp_storage_location
class FileTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), "content")
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", "content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(
sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
)
# Files can be read in a little at a time, if necessary.
obj1.normal.open()
self.assertEqual(obj1.normal.read(3), "con")
self.assertEqual(obj1.normal.read(), "tent")
self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), ["co", "nt", "en", "t"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
# Default values allow an object to access a single file.
obj3 = Storage.objects.create()
self.assertEqual(obj3.default.name, "tests/default.txt")
self.assertEqual(obj3.default.read(), "default content")
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj3.delete()
obj3 = Storage()
self.assertEqual(obj3.default.read(), "default content")
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj4 = Storage()
obj4.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj4.random.name.endswith("/random_file"))
# Clean up the temporary files and dir.
obj1.normal.delete()
obj2.normal.delete()
obj3.default.delete()
obj4.random.delete()
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/web/sux.py
|
11
|
# -*- test-case-name: twisted.web.test.test_xml -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
*S*mall, *U*ncomplicated *X*ML.
This is a very simple implementation of XML/HTML as a network
protocol. It is not at all clever. Its main features are that it
does not:
- support namespaces
- mung mnemonic entity references
- validate
- perform *any* external actions (such as fetching URLs or writing files)
under *any* circumstances
- has lots and lots of horrible hacks for supporting broken HTML (as an
option, they're not on by default).
"""
from twisted.internet.protocol import Protocol
from twisted.python.reflect import prefixedMethodNames
# Elements of the three-tuples in the state table.
BEGIN_HANDLER = 0
DO_HANDLER = 1
END_HANDLER = 2
identChars = '.-_:'
lenientIdentChars = identChars + ';+#/%~'
def nop(*args, **kw):
"Do nothing."
def unionlist(*args):
l = []
for x in args:
l.extend(x)
d = dict([(x, 1) for x in l])
return d.keys()
def zipfndict(*args, **kw):
default = kw.get('default', nop)
d = {}
for key in unionlist(*[fndict.keys() for fndict in args]):
d[key] = tuple([x.get(key, default) for x in args])
return d
def prefixedMethodClassDict(clazz, prefix):
return dict([(name, getattr(clazz, prefix + name)) for name in prefixedMethodNames(clazz, prefix)])
def prefixedMethodObjDict(obj, prefix):
return dict([(name, getattr(obj, prefix + name)) for name in prefixedMethodNames(obj.__class__, prefix)])
class ParseError(Exception):
def __init__(self, filename, line, col, message):
self.filename = filename
self.line = line
self.col = col
self.message = message
def __str__(self):
return "%s:%s:%s: %s" % (self.filename, self.line, self.col,
self.message)
class XMLParser(Protocol):
state = None
encodings = None
filename = "<xml />"
beExtremelyLenient = 0
_prepend = None
# _leadingBodyData will sometimes be set before switching to the
# 'bodydata' state, when we "accidentally" read a byte of bodydata
# in a different state.
_leadingBodyData = None
def connectionMade(self):
self.lineno = 1
self.colno = 0
self.encodings = []
def saveMark(self):
'''Get the line number and column of the last character parsed'''
# This gets replaced during dataReceived, restored afterwards
return (self.lineno, self.colno)
def _parseError(self, message):
raise ParseError(*((self.filename,)+self.saveMark()+(message,)))
def _buildStateTable(self):
'''Return a dictionary of begin, do, end state function tuples'''
# _buildStateTable leaves something to be desired but it does what it
# does.. probably slowly, so I'm doing some evil caching so it doesn't
# get called more than once per class.
stateTable = getattr(self.__class__, '__stateTable', None)
if stateTable is None:
stateTable = self.__class__.__stateTable = zipfndict(
*[prefixedMethodObjDict(self, prefix)
for prefix in ('begin_', 'do_', 'end_')])
return stateTable
def _decode(self, data):
if 'UTF-16' in self.encodings or 'UCS-2' in self.encodings:
assert not len(data) & 1, 'UTF-16 must come in pairs for now'
if self._prepend:
data = self._prepend + data
for encoding in self.encodings:
data = unicode(data, encoding)
return data
def maybeBodyData(self):
if self.endtag:
return 'bodydata'
# Get ready for fun! We're going to allow
# <script>if (foo < bar)</script> to work!
# We do this by making everything between <script> and
# </script> a Text
# BUT <script src="foo"> will be special-cased to do regular,
# lenient behavior, because those may not have </script>
# -radix
if (self.tagName == 'script' and 'src' not in self.tagAttributes):
# we do this ourselves rather than having begin_waitforendscript
# because that can get called multiple times and we don't want
# bodydata to get reset other than the first time.
self.begin_bodydata(None)
return 'waitforendscript'
return 'bodydata'
def dataReceived(self, data):
stateTable = self._buildStateTable()
if not self.state:
# all UTF-16 starts with this string
if data.startswith('\xff\xfe'):
self._prepend = '\xff\xfe'
self.encodings.append('UTF-16')
data = data[2:]
elif data.startswith('\xfe\xff'):
self._prepend = '\xfe\xff'
self.encodings.append('UTF-16')
data = data[2:]
self.state = 'begin'
if self.encodings:
data = self._decode(data)
# bring state, lineno, colno into local scope
lineno, colno = self.lineno, self.colno
curState = self.state
# replace saveMark with a nested scope function
_saveMark = self.saveMark
def saveMark():
return (lineno, colno)
self.saveMark = saveMark
# fetch functions from the stateTable
beginFn, doFn, endFn = stateTable[curState]
try:
for byte in data:
# do newline stuff
if byte == '\n':
lineno += 1
colno = 0
else:
colno += 1
newState = doFn(byte)
if newState is not None and newState != curState:
# this is the endFn from the previous state
endFn()
curState = newState
beginFn, doFn, endFn = stateTable[curState]
beginFn(byte)
finally:
self.saveMark = _saveMark
self.lineno, self.colno = lineno, colno
# state doesn't make sense if there's an exception..
self.state = curState
def connectionLost(self, reason):
"""
End the last state we were in.
"""
stateTable = self._buildStateTable()
stateTable[self.state][END_HANDLER]()
# state methods
def do_begin(self, byte):
if byte.isspace():
return
if byte != '<':
if self.beExtremelyLenient:
self._leadingBodyData = byte
return 'bodydata'
self._parseError("First char of document [%r] wasn't <" % (byte,))
return 'tagstart'
def begin_comment(self, byte):
self.commentbuf = ''
def do_comment(self, byte):
self.commentbuf += byte
if self.commentbuf.endswith('-->'):
self.gotComment(self.commentbuf[:-3])
return 'bodydata'
def begin_tagstart(self, byte):
self.tagName = '' # name of the tag
self.tagAttributes = {} # attributes of the tag
self.termtag = 0 # is the tag self-terminating
self.endtag = 0
def do_tagstart(self, byte):
if byte.isalnum() or byte in identChars:
self.tagName += byte
if self.tagName == '!--':
return 'comment'
elif byte.isspace():
if self.tagName:
if self.endtag:
# properly strict thing to do here is probably to only
# accept whitespace
return 'waitforgt'
return 'attrs'
else:
self._parseError("Whitespace before tag-name")
elif byte == '>':
if self.endtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
else:
self.gotTagStart(self.tagName, {})
return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData()
elif byte == '/':
if self.tagName:
return 'afterslash'
else:
self.endtag = 1
elif byte in '!?':
if self.tagName:
if not self.beExtremelyLenient:
self._parseError("Invalid character in tag-name")
else:
self.tagName += byte
self.termtag = 1
elif byte == '[':
if self.tagName == '!':
return 'expectcdata'
else:
self._parseError("Invalid '[' in tag-name")
else:
if self.beExtremelyLenient:
self.bodydata = '<'
return 'unentity'
self._parseError('Invalid tag character: %r'% byte)
def begin_unentity(self, byte):
self.bodydata += byte
def do_unentity(self, byte):
self.bodydata += byte
return 'bodydata'
def end_unentity(self):
self.gotText(self.bodydata)
def begin_expectcdata(self, byte):
self.cdatabuf = byte
def do_expectcdata(self, byte):
self.cdatabuf += byte
cdb = self.cdatabuf
cd = '[CDATA['
if len(cd) > len(cdb):
if cd.startswith(cdb):
return
elif self.beExtremelyLenient:
## WHAT THE CRAP!? MSWord9 generates HTML that includes these
## bizarre <![if !foo]> <![endif]> chunks, so I've gotta ignore
## 'em as best I can. this should really be a separate parse
## state but I don't even have any idea what these _are_.
return 'waitforgt'
else:
self._parseError("Mal-formed CDATA header")
if cd == cdb:
self.cdatabuf = ''
return 'cdata'
self._parseError("Mal-formed CDATA header")
def do_cdata(self, byte):
self.cdatabuf += byte
if self.cdatabuf.endswith("]]>"):
self.cdatabuf = self.cdatabuf[:-3]
return 'bodydata'
def end_cdata(self):
self.gotCData(self.cdatabuf)
self.cdatabuf = ''
def do_attrs(self, byte):
if byte.isalnum() or byte in identChars:
# XXX FIXME really handle !DOCTYPE at some point
if self.tagName == '!DOCTYPE':
return 'doctype'
if self.tagName[0] in '!?':
return 'waitforgt'
return 'attrname'
elif byte.isspace():
return
elif byte == '>':
self.gotTagStart(self.tagName, self.tagAttributes)
return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData()
elif byte == '/':
return 'afterslash'
elif self.beExtremelyLenient:
# discard and move on? Only case I've seen of this so far was:
# <foo bar="baz"">
return
self._parseError("Unexpected character: %r" % byte)
def begin_doctype(self, byte):
self.doctype = byte
def do_doctype(self, byte):
if byte == '>':
return 'bodydata'
self.doctype += byte
def end_doctype(self):
self.gotDoctype(self.doctype)
self.doctype = None
def do_waitforgt(self, byte):
if byte == '>':
if self.endtag or not self.beExtremelyLenient:
return 'bodydata'
return self.maybeBodyData()
def begin_attrname(self, byte):
self.attrname = byte
self._attrname_termtag = 0
def do_attrname(self, byte):
if byte.isalnum() or byte in identChars:
self.attrname += byte
return
elif byte == '=':
return 'beforeattrval'
elif byte.isspace():
return 'beforeeq'
elif self.beExtremelyLenient:
if byte in '"\'':
return 'attrval'
if byte in lenientIdentChars or byte.isalnum():
self.attrname += byte
return
if byte == '/':
self._attrname_termtag = 1
return
if byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if self._attrname_termtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
# something is really broken. let's leave this attribute where it
# is and move on to the next thing
return
self._parseError("Invalid attribute name: %r %r" % (self.attrname, byte))
def do_beforeattrval(self, byte):
if byte in '"\'':
return 'attrval'
elif byte.isspace():
return
elif self.beExtremelyLenient:
if byte in lenientIdentChars or byte.isalnum():
return 'messyattr'
if byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
return self.maybeBodyData()
if byte == '\\':
# I saw this in actual HTML once:
# <font size=\"3\"><sup>SM</sup></font>
return
self._parseError("Invalid initial attribute value: %r; Attribute values must be quoted." % byte)
attrname = ''
attrval = ''
def begin_beforeeq(self,byte):
self._beforeeq_termtag = 0
def do_beforeeq(self, byte):
if byte == '=':
return 'beforeattrval'
elif byte.isspace():
return
elif self.beExtremelyLenient:
if byte.isalnum() or byte in identChars:
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
return 'attrname'
elif byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if self._beforeeq_termtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
elif byte == '/':
self._beforeeq_termtag = 1
return
self._parseError("Invalid attribute")
def begin_attrval(self, byte):
self.quotetype = byte
self.attrval = ''
def do_attrval(self, byte):
if byte == self.quotetype:
return 'attrs'
self.attrval += byte
def end_attrval(self):
self.tagAttributes[self.attrname] = self.attrval
self.attrname = self.attrval = ''
def begin_messyattr(self, byte):
self.attrval = byte
def do_messyattr(self, byte):
if byte.isspace():
return 'attrs'
elif byte == '>':
endTag = 0
if self.attrval.endswith('/'):
endTag = 1
self.attrval = self.attrval[:-1]
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if endTag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
else:
self.attrval += byte
def end_messyattr(self):
if self.attrval:
self.tagAttributes[self.attrname] = self.attrval
def begin_afterslash(self, byte):
self._after_slash_closed = 0
def do_afterslash(self, byte):
# this state is only after a self-terminating slash, e.g. <foo/>
if self._after_slash_closed:
self._parseError("Mal-formed")#XXX When does this happen??
if byte != '>':
if self.beExtremelyLenient:
return
else:
self._parseError("No data allowed after '/'")
self._after_slash_closed = 1
self.gotTagStart(self.tagName, self.tagAttributes)
self.gotTagEnd(self.tagName)
# don't need maybeBodyData here because there better not be
# any javascript code after a <script/>... we'll see :(
return 'bodydata'
def begin_bodydata(self, byte):
if self._leadingBodyData:
self.bodydata = self._leadingBodyData
del self._leadingBodyData
else:
self.bodydata = ''
def do_bodydata(self, byte):
if byte == '<':
return 'tagstart'
if byte == '&':
return 'entityref'
self.bodydata += byte
def end_bodydata(self):
self.gotText(self.bodydata)
self.bodydata = ''
def do_waitforendscript(self, byte):
if byte == '<':
return 'waitscriptendtag'
self.bodydata += byte
def begin_waitscriptendtag(self, byte):
self.temptagdata = ''
self.tagName = ''
self.endtag = 0
def do_waitscriptendtag(self, byte):
# 1 enforce / as first byte read
# 2 enforce following bytes to be subset of "script" until
# tagName == "script"
# 2a when that happens, gotText(self.bodydata) and gotTagEnd(self.tagName)
# 3 spaces can happen anywhere, they're ignored
# e.g. < / script >
# 4 anything else causes all data I've read to be moved to the
# bodydata, and switch back to waitforendscript state
# If it turns out this _isn't_ a </script>, we need to
# remember all the data we've been through so we can append it
# to bodydata
self.temptagdata += byte
# 1
if byte == '/':
self.endtag = True
elif not self.endtag:
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
# 2
elif byte.isalnum() or byte in identChars:
self.tagName += byte
if not 'script'.startswith(self.tagName):
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
elif self.tagName == 'script':
self.gotText(self.bodydata)
self.gotTagEnd(self.tagName)
return 'waitforgt'
# 3
elif byte.isspace():
return 'waitscriptendtag'
# 4
else:
self.bodydata += "<" + self.temptagdata
return 'waitforendscript'
def begin_entityref(self, byte):
self.erefbuf = ''
self.erefextra = '' # extra bit for lenient mode
def do_entityref(self, byte):
if byte.isspace() or byte == "<":
if self.beExtremelyLenient:
# '&foo' probably was '&foo'
if self.erefbuf and self.erefbuf != "amp":
self.erefextra = self.erefbuf
self.erefbuf = "amp"
if byte == "<":
return "tagstart"
else:
self.erefextra += byte
return 'spacebodydata'
self._parseError("Bad entity reference")
elif byte != ';':
self.erefbuf += byte
else:
return 'bodydata'
def end_entityref(self):
self.gotEntityReference(self.erefbuf)
# hacky support for space after & in entityref in beExtremelyLenient
# state should only happen in that case
def begin_spacebodydata(self, byte):
self.bodydata = self.erefextra
self.erefextra = None
do_spacebodydata = do_bodydata
end_spacebodydata = end_bodydata
# Sorta SAX-ish API
def gotTagStart(self, name, attributes):
'''Encountered an opening tag.
Default behaviour is to print.'''
print 'begin', name, attributes
def gotText(self, data):
'''Encountered text
Default behaviour is to print.'''
print 'text:', repr(data)
def gotEntityReference(self, entityRef):
'''Encountered mnemonic entity reference
Default behaviour is to print.'''
print 'entityRef: &%s;' % entityRef
def gotComment(self, comment):
'''Encountered comment.
Default behaviour is to ignore.'''
pass
def gotCData(self, cdata):
'''Encountered CDATA
Default behaviour is to call the gotText method'''
self.gotText(cdata)
def gotDoctype(self, doctype):
"""Encountered DOCTYPE
This is really grotty: it basically just gives you everything between
'<!DOCTYPE' and '>' as an argument.
"""
print '!DOCTYPE', repr(doctype)
def gotTagEnd(self, name):
'''Encountered closing tag
Default behaviour is to print.'''
print 'end', name
|
lite3/adbtool
|
refs/heads/master
|
adbtool/apkinstall.py
|
1
|
import argparse
import os
import os.path
import sys
from cmd import call, getAdb
import adbdevice
import apkinfo
# BASE_DIR="F:/release"
BASE_DIR = ""
def getApks(path, filters):
apks = os.listdir(path)
apks = filter(lambda filename: filename.endswith(".apk"), apks)
if filters is not None:
def myfilterfun(filename):
for f in filters:
if f not in filename:
return False
return True
apks = filter(myfilterfun, apks)
apks = map(lambda filename: os.path.join(path, filename), apks)
return apks
def getNewst(apks):
if len(apks) == 0:
return None
apks = sorted(
apks, cmp=lambda fa, fb: int(os.path.getmtime(fb) - os.path.getmtime(fa))
)
return apks[0]
def filterApks(fileorpath, filters):
apk = fileorpath
if os.path.isdir(fileorpath):
apks = getApks(fileorpath, filters)
if len(apks) == 0:
print("can not found apk file in %s " % fileorpath)
exit(1)
apk = getNewst(apks)
return apk
def install(apks, serials, run):
adb = getAdb()
last = len(apks) - 1
for i in range(0, len(apks)):
apk = apks[i]
isrun = run and last == i
for serial in serials:
cmd = '%s -s %s install -r "%s"' % (adb, serial, apk)
_, isOk = call(cmd, True)
print(isOk)
if isOk and isrun:
activity = apkinfo.parse(apk)
cmd = '%s -s %s shell am start -S "%s"' % (adb, serial, activity)
call(cmd)
# -------------- main ----------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage="%(prog)s [options] [path]", description="install apk file."
)
parser.add_argument("-f", "--filter", nargs="*", help="filtered by file name")
parser.add_argument(
"-r", "--run", action="store_true", help="run app after install"
)
parser.add_argument("path", nargs="?")
adbdevice.addArgumentParser(parser)
args = parser.parse_args()
isOk, serials, devices = adbdevice.doArgumentParser(args)
if isOk:
exit(0)
path = args.path if args.path is not None else "."
path = os.path.abspath(os.path.join(BASE_DIR, path))
apks = filterApks(path, args.filter)
if serials is not None and apks is not None:
install([apks], serials, args.run)
|
iamjy/beaglebone-kernel
|
refs/heads/master
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
medspx/QGIS
|
refs/heads/master
|
tests/src/python/test_versioncompare.py
|
74
|
# -*- coding: utf-8 -*-
'''
test_versioncompare.py
--------------------------------------
Date : September 2016
Copyright : (C) 2016 Alexander Bruy
email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis # NOQA
from qgis.testing import unittest, start_app
from pyplugin_installer.version_compare import compareVersions
start_app()
class TestVersionCompare(unittest.TestCase):
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def testCompareVersions(self):
a = '1.0.0'
# a == b
b = '1.0.0'
self.assertEqual(compareVersions(a, b), 0)
# a > b
b = '0.1.0'
self.assertEqual(compareVersions(a, b), 1)
# b > a
b = '1.1.0'
self.assertEqual(compareVersions(a, b), 2)
# test that prefix stripped correctly
a = 'ver. 1.0.0'
b = 'ver. 0.1.0'
self.assertEqual(compareVersions(a, b), 1)
# test versions with build numbers
a = '1.0.0-1'
b = '1.0.0-2'
self.assertEqual(compareVersions(a, b), 2)
# test versions with suffixes
a = '1.0.0a'
b = '1.0.0b'
self.assertEqual(compareVersions(a, b), 2)
# test versions with suffixes in different cases
a = '1.0.0-201609011405-2690BD9'
b = '1.0.0-201609011405-2690bd9'
self.assertEqual(compareVersions(a, b), 0)
if __name__ == '__main__':
unittest.main()
|
pavlova-marina/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/lidar/lastools/lascontrol.py
|
9
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lascontrol.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lascontrol(LAStoolsAlgorithm):
POLYGON = "POLYGON"
INTERIOR = "INTERIOR"
OPERATION = "OPERATION"
OPERATIONS = ["clip", "classify"]
CLASSIFY_AS = "CLASSIFY_AS"
def defineCharacteristics(self):
self.name = "lascontrol"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterVector(lascontrol.POLYGON,
self.tr("Input polygon(s)"), ParameterVector.VECTOR_TYPE_POLYGON))
self.addParameter(ParameterBoolean(lascontrol.INTERIOR,
self.tr("interior"), False))
self.addParameter(ParameterSelection(lascontrol.OPERATION,
self.tr("what to do with isolated points"), lascontrol.OPERATIONS, 0))
self.addParameter(ParameterNumber(lascontrol.CLASSIFY_AS,
self.tr("classify as"), 0, None, 12))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lascontrol")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
poly = self.getParameterValue(lascontrol.POLYGON)
if poly is not None:
commands.append("-poly")
commands.append(poly)
if self.getParameterValue(lascontrol.INTERIOR):
commands.append("-interior")
operation = self.getParameterValue(lascontrol.OPERATION)
if operation != 0:
commands.append("-classify")
classify_as = self.getParameterValue(lascontrol.CLASSIFY_AS)
commands.append(str(classify_as))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gahoo/PiBox
|
refs/heads/master
|
PiBox/PiHome/PiApp/models.py
|
4
|
#coding=utf-8
'''
# The modules contains PiApp's models
# Any issues or improvements please contact jacob-chen@iotwrt.com
'''
from django.db import models
from django.contrib.auth.models import AbstractUser
from PiHome import settings
import os
UPLOAD_ROOT = 'pibox_upload'
UPLOAD_ROOT_HOUSR_PLAN='pibox_upload/house_plan/'
UPLOAD_ROOT_PIC='pibox_upload/pic_datapoint/'
class PiUser(AbstractUser):
new_field = models.CharField(max_length=100)
class PiSettings(models.Model):
ip = models.GenericIPAddressField(default="127.0.0.1")
port = models.IntegerField(default=3333)
enable_register = models.BooleanField(default=True)
"""
my house model
"""
class Device(models.Model):
name = models.CharField(max_length=30)
describe = models.TextField(default='')
location = models.CharField(max_length=100)
x = models.FloatField()
y = models.FloatField()
class Home(models.Model):
name = models.CharField(max_length=100)
img = models.ImageField(upload_to = UPLOAD_ROOT_HOUSR_PLAN)
SENSORCHOICE = (
('s', 'switch'),
('n', 'numeric'),
('p', 'picture'),
)
CONDITIONCHOICE = (
('lt', 'less than'),
('lte', 'less than or equal'),
('gt', 'greater than'),
('gte', 'greater than or equal'),
('ne', 'not equal'),
('e', 'equal'),
)
class Sensor(models.Model):
device = models.ForeignKey(Device, related_name="sensor") #books = author.books.all()
name = models.CharField(max_length=100)
describe = models.TextField(default='')
sensor_class = models.CharField(max_length=1, choices=SENSORCHOICE)
unit = models.CharField(max_length=20, blank=True)
callback_value = models.FloatField(default=0)
callback_condition = models.CharField(max_length=3, blank=True, choices=CONDITIONCHOICE)
callback_file = models.CharField(max_length=120, blank=True)
class SwitchDatapoint(models.Model):
sensor = models.ForeignKey(Sensor, unique=True)
status = models.BooleanField(default=False)
class NumDatapoint(models.Model):
sensor = models.ForeignKey(Sensor)
#yyyy-MM-dd HH:mm:ss
key = models.DateTimeField(unique=True)
value = models.FloatField()
class PicDatapoint(models.Model):
sensor = models.ForeignKey(Sensor)
#yyyy-MM-dd HH:mm:ss
key = models.DateTimeField(unique=True)
pic_file = models.ImageField(upload_to = UPLOAD_ROOT_PIC)
"""
others
"""
TYPECHOICE = (
('d', 'danger'),
('i', 'info'),
('w', 'warning'),
)
class Notification(models.Model):
"""消息通知类
"""
type = models.CharField(max_length=1, choices=TYPECHOICE)
## 0 for all
user_id = models.IntegerField(db_index=True, default=0)
title = models.TextField(default='')
content = models.TextField(default='')
has_readed = models.BooleanField(default=False)
|
hypnotika/namebench
|
refs/heads/master
|
tools/convert_servers_to_csv.py
|
174
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to convert listing to CSV format."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import re
import sys
import check_nameserver_popularity
import GeoIP
sys.path.append('..')
#sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import addr_util
from libnamebench import nameserver_list
from libnamebench import config
output = csv.writer(open('output.csv', 'w'))
#output.writerow(['IP', 'Name', 'Hostname', 'Country/Region/City', 'Coords', 'ASN', 'Label', 'Status', 'Refs'])
gi = GeoIP.open('/usr/local/share/GeoLiteCity.dat', GeoIP.GEOIP_MEMORY_CACHE)
asn_lookup = GeoIP.open('/usr/local/share/GeoIPASNum.dat', GeoIP.GEOIP_MEMORY_CACHE)
ns_hash = config.GetLocalNameServerList()
for ip in ns_hash:
try:
details = gi.record_by_addr(ip)
except SystemError:
pass
if not details:
details = {}
city = details.get('city', '')
if city:
city = city.decode('latin-1')
country = details.get('country_name', '')
if country:
country = country.decode('latin-1')
latitude = details.get('latitude', '')
longitude = details.get('longitude', '')
country_code = details.get('country_code', '')
region = details.get('region_name', '')
if region:
region = region.decode('latin-1')
hostname = ns_hash[ip].get('hostname', '')
geo = '/'.join([x for x in [country_code, region, city] if x and not x.isdigit()]).encode('utf-8')
coords = ','.join(map(str, [latitude,longitude]))
status = ns_hash[ip].get('notes', '')
refs = None
asn = asn_lookup.org_by_addr(ip)
labels = ' '.join(list(ns_hash[ip]['labels']))
urls = check_nameserver_popularity.GetUrls(ip)
use_keywords = set()
if ns_hash[ip]['name'] and 'UNKNOWN' not in ns_hash[ip]['name']:
for word in ns_hash[ip]['name'].split(' ')[0].split('/'):
use_keywords.add(word.lower())
use_keywords.add(re.sub('[\W_]', '', word.lower()))
if '-' in word:
use_keywords.add(word.lower().replace('-', ''))
if hostname and hostname != ip:
use_keywords.add(addr_util.GetDomainPartOfHostname(ip))
for bad_word in ('ns', 'dns'):
if bad_word in use_keywords:
use_keywords.remove(bad_word)
print use_keywords
context_urls = []
for url in urls:
for keyword in use_keywords:
if re.search(keyword, url, re.I):
context_urls.append(url)
break
if context_urls:
urls = context_urls
row = [ip, labels, ns_hash[ip]['name'], hostname, geo, coords, asn, status[0:30], ' '.join(urls[:2])]
print row
output.writerow(row)
|
ddv2005/intercom
|
refs/heads/master
|
3rdparty/pjproject/tests/pjsua/scripts-recvfrom/202_reg_good_ok_wildcard.py
|
59
|
# $Id: 202_reg_good_ok_wildcard.py 2392 2008-12-22 18:54:58Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--username user --realm \"*\" --password passwd --auto-update-nat=0"
req1 = sip.RecvfromTransaction("Initial registration", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1234\""],
expect="SIP/2.0 401"
)
req2 = sip.RecvfromTransaction("Registration retry with auth", 200,
include=["REGISTER sip", "Authorization:",
"realm=\"python\"", "username=\"user\"",
"nonce=\"1234\"", "response="],
expect="registration success"
)
recvfrom_cfg = sip.RecvfromCfg("Successful registration with wildcard realm test",
pjsua, [req1, req2])
|
grischa/django-tastypie
|
refs/heads/master
|
tastypie/contrib/contenttypes/fields.py
|
16
|
from functools import partial
from tastypie import fields
from tastypie.resources import Resource
from tastypie.exceptions import ApiFieldError
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from .resources import GenericResource
class GenericForeignKeyField(fields.ToOneField):
"""
Provides access to GenericForeignKey objects from the django content_types
framework.
"""
def __init__(self, to, attribute, **kwargs):
if not isinstance(to, dict):
raise ValueError('to field must be a dictionary in GenericForeignKeyField')
if len(to) <= 0:
raise ValueError('to field must have some values')
for k, v in to.iteritems():
if not issubclass(k, models.Model) or not issubclass(v, Resource):
raise ValueError('to field must map django models to tastypie resources')
super(GenericForeignKeyField, self).__init__(to, attribute, **kwargs)
def get_related_resource(self, related_instance):
self._to_class = self.to.get(type(related_instance), None)
if self._to_class is None:
raise TypeError('no resource for model %s' % type(related_instance))
return super(GenericForeignKeyField, self).get_related_resource(related_instance)
@property
def to_class(self):
if self._to_class and not issubclass(GenericResource, self._to_class):
return self._to_class
return partial(GenericResource, resources=self.to.values())
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
try:
obj = fk_resource.get_via_uri(uri, request=request)
fk_resource = self.get_related_resource(obj)
return super(GenericForeignKeyField, self).resource_from_uri(fk_resource, uri, request, related_obj, related_name)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def build_related_resource(self, *args, **kwargs):
self._to_class = None
return super(GenericForeignKeyField, self).build_related_resource(*args, **kwargs)
|
tdomhan/thrax
|
refs/heads/master
|
scripts/berant_to_reference.py
|
3
|
#!/usr/bin/env python
import os, sys, codecs
def main():
# <eat::animal_1::animal_2> <kill::animal_1::animal_2>
# <be affected than::animal_2::animal_1> <be susceptible than::animal_2::animal_1>
for line in sys.stdin:
(source, target) = line.lstrip().rstrip().split("\t")
(s_phr, s1, s2) = source[1:-1].split("::")
(t_phr, t1, t2) = target[1:-1].split("::")
if (s1[-2:] == t1[-2:]):
t1 = "[1]"
t2 = "[2]"
else:
t1 = "[2]"
t2 = "[1]"
s1 = "[1]"
s2 = "[2]"
print s1 + " " + s_phr + " " + s2 + " ||| " + t1 + " " + t_phr + " " + t2
if __name__ == "__main__":
main()
|
indexofire/django-cms-content
|
refs/heads/master
|
cms_content/menu.py
|
2
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from cms_content.settings import ROOT_URL
from cms_content.models import CMSMenuID, CMSSection, CMSCategory, CMSArticle
from cms_content.utils.cache import get_cache_key
class CMSContentMenu(CMSAttachMenu):
"""CMS Content Menu
Append cms_content menu into cms menu.
"""
name = _(u"CMS Content Menu")
def get_nodes(self, request):
nodes = []
sections = list(CMSSection.objects.all().select_related(depth=1))
categories = list(CMSCategory.objects.all().select_related(depth=1))
articles = list(CMSArticle.objects.all().select_related(depth=1))
for section in sections:
nodes.append(NavigationNode(
section.name,
section.url,
section.menu.menuid,
)
)
for category in categories:
nodes.append(NavigationNode(
category.name,
category.url,
category.menu.menuid,
category.menu.parent,
)
)
#for article in articles:
# nodes.append(NavigationNode(
# article.title,
# article.url,
# article.menu.menuid,
# article.menu.parent,
# )
# )
return nodes
menu_pool.register_menu(CMSContentMenu)
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/graph_objs/table/_hoverlabel.py
|
2
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "table"
_path_str = "table.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.table.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.table.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
AshishNamdev/linux
|
refs/heads/master
|
Documentation/sphinx/cdomain.py
|
146
|
# -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
u"""
cdomain
~~~~~~~
Replacement for the sphinx c-domain.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
List of customizations:
* Moved the *duplicate C object description* warnings for function
declarations in the nitpicky mode. See Sphinx documentation for
the config values for ``nitpick`` and ``nitpick_ignore``.
* Add option 'name' to the "c:function:" directive. With option 'name' the
ref-name of a function can be modified. E.g.::
.. c:function:: int ioctl( int fd, int request )
:name: VIDIOC_LOG_STATUS
The func-name (e.g. ioctl) remains in the output but the ref-name changed
from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
* :c:func:`VIDIOC_LOG_STATUS` or
* :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
* Handle signatures of function-like macros well. Don't try to deduce
arguments types of function-like macros.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx import addnodes
from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
from sphinx.domains.c import CObject as Base_CObject
from sphinx.domains.c import CDomain as Base_CDomain
__version__ = '1.0'
# Get Sphinx version
major, minor, patch = map(int, sphinx.__version__.split("."))
def setup(app):
app.override_domain(CDomain)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class CObject(Base_CObject):
"""
Description of a C language object.
"""
option_spec = {
"name" : directives.unchanged
}
def handle_func_like_macro(self, sig, signode):
u"""Handles signatures of function-like macros.
If the objtype is 'function' and the the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
``False`` is returned. """
if not self.objtype == 'function':
return False
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, fullname, arglist, _const = m.groups()
arglist = arglist.strip()
if rettype or not arglist:
return False
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
arglist = [a.strip() for a in arglist.split(",")]
# has the first argument a type?
if len(arglist[0].split(" ")) > 1:
return False
# This is a function-like macro, it's arguments are typeless!
signode += addnodes.desc_name(fullname, fullname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
for argname in arglist:
param = addnodes.desc_parameter('', '', noemph=True)
# separate by non-breaking space in the output
param += nodes.emphasis(argname, argname)
paramlist += param
return fullname
def handle_signature(self, sig, signode):
"""Transform a C signature into RST nodes."""
fullname = self.handle_func_like_macro(sig, signode)
if not fullname:
fullname = super(CObject, self).handle_signature(sig, signode)
if "name" in self.options:
if self.objtype == 'function':
fullname = self.options["name"]
else:
# FIXME: handle :name: value of other declaration types?
pass
return fullname
def add_target_and_index(self, name, sig, signode):
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if (name in inv and self.env.config.nitpicky):
if self.objtype == 'function':
if ('c:func', name) not in self.env.config.nitpick_ignore:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
if major == 1 and minor < 4:
# indexnode's tuple changed in 1.4
# https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c
self.indexnode['entries'].append(
('single', indextext, targetname, ''))
else:
self.indexnode['entries'].append(
('single', indextext, targetname, '', None))
class CDomain(Base_CDomain):
"""C language domain."""
name = 'c'
label = 'C'
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
|
angelapper/odoo
|
refs/heads/9.0
|
openerp/report/render/rml2pdf/__init__.py
|
49
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from trml2pdf import parseString, parseNode
|
jpt4/urbit
|
refs/heads/master
|
outside/commonmark/test/spec_tests.py
|
23
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from difflib import unified_diff
import argparse
import re
import json
from cmark import CMark
from normalize import normalize_html
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run cmark tests.')
parser.add_argument('--program', dest='program', nargs='?', default=None,
help='program to test')
parser.add_argument('--spec', dest='spec', nargs='?', default='spec.txt',
help='path to spec')
parser.add_argument('--pattern', dest='pattern', nargs='?',
default=None, help='limit to sections matching regex pattern')
parser.add_argument('--library-dir', dest='library_dir', nargs='?',
default=None, help='directory containing dynamic library')
parser.add_argument('--no-normalize', dest='normalize',
action='store_const', const=False, default=True,
help='do not normalize HTML')
parser.add_argument('--dump-tests', dest='dump_tests',
action='store_const', const=True, default=False,
help='dump tests in JSON format')
parser.add_argument('--debug-normalization', dest='debug_normalization',
action='store_const', const=True,
default=False, help='filter stdin through normalizer for testing')
args = parser.parse_args(sys.argv[1:])
def print_test_header(headertext, example_number, start_line, end_line):
print "Example %d (lines %d-%d) %s" % (example_number,start_line,end_line,headertext)
def do_test(test, normalize):
[retcode, actual_html, err] = cmark.to_html(test['markdown'])
if retcode == 0:
expected_html = test['html']
if normalize:
passed = normalize_html(actual_html) == normalize_html(expected_html)
else:
passed = actual_html == expected_html
if passed:
return 'pass'
else:
print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
sys.stdout.write(test['markdown'])
expected_html_lines = expected_html.splitlines(True)
actual_html_lines = actual_html.splitlines(True)
for diffline in unified_diff(expected_html_lines, actual_html_lines,
"expected HTML", "actual HTML"):
sys.stdout.write(diffline)
sys.stdout.write('\n')
return 'fail'
else:
print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
print "program returned error code %d" % retcode
print(err)
return 'error'
def get_tests(specfile):
line_number = 0
start_line = 0
end_line = 0
example_number = 0
markdown_lines = []
html_lines = []
state = 0 # 0 regular text, 1 markdown example, 2 html output
headertext = ''
tests = []
header_re = re.compile('#+ ')
with open(specfile, 'r') as specf:
for line in specf:
line_number = line_number + 1
if state == 0 and re.match(header_re, line):
headertext = header_re.sub('', line).strip()
if line.strip() == ".":
state = (state + 1) % 3
if state == 0:
example_number = example_number + 1
end_line = line_number
tests.append({
"markdown":''.join(markdown_lines).replace('→',"\t"),
"html":''.join(html_lines),
"example": example_number,
"start_line": start_line,
"end_line": end_line,
"section": headertext})
start_line = 0
markdown_lines = []
html_lines = []
elif state == 1:
if start_line == 0:
start_line = line_number - 1
markdown_lines.append(line)
elif state == 2:
html_lines.append(line)
return tests
def do_tests(cmark, tests, pattern, normalize):
passed = 0
errored = 0
failed = 0
skipped = 0
if pattern:
pattern_re = re.compile(pattern, re.IGNORECASE)
else:
pattern_re = re.compile('.')
for test in tests:
if re.search(pattern_re, test['section']):
result = do_test(test, normalize)
if result == 'pass':
passed += 1
elif result == 'fail':
failed += 1
else:
errored += 1
else:
skipped += 1
print "%d passed, %d failed, %d errored, %d skipped" % (passed, failed, errored, skipped)
return (failed == 0 and errored == 0)
if __name__ == "__main__":
if args.debug_normalization:
print normalize_html(sys.stdin.read())
exit(0)
tests = get_tests(args.spec)
if args.dump_tests:
print json.dumps(tests, ensure_ascii=False, indent=2)
exit(0)
else:
cmark = CMark(prog=args.program, library_dir=args.library_dir)
if do_tests(cmark, tests, args.pattern, args.normalize):
exit(0)
else:
exit(1)
|
rynomster/django
|
refs/heads/master
|
tests/test_discovery_sample/tests_sample.py
|
98
|
import doctest
from unittest import TestCase
from django.test import SimpleTestCase, TestCase as DjangoTestCase, tag
from . import doctests
class TestVanillaUnittest(TestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestDjangoTestCase(DjangoTestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestZimpleTestCase(SimpleTestCase):
# Z is used to trick this test case to appear after Vanilla in default suite
def test_sample(self):
self.assertEqual(1, 1)
class EmptyTestCase(TestCase):
pass
@tag('slow')
class TaggedTestCase(TestCase):
@tag('fast')
def test_single_tag(self):
self.assertEqual(1, 1)
@tag('fast', 'core')
def test_multiple_tags(self):
self.assertEqual(1, 1)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(doctests))
return tests
|
lihui7115/ChromiumGStreamerBackend
|
refs/heads/master
|
tools/json_comment_eater/json_comment_eater.py
|
42
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Utility to remove comments from JSON files so that they can be parsed by
json.loads.
'''
import sys
def _Rcount(string, chars):
'''Returns the number of consecutive characters from |chars| that occur at the
end of |string|.
'''
return len(string) - len(string.rstrip(chars))
def _FindNextToken(string, tokens, start):
'''Finds the next token in |tokens| that occurs in |string| from |start|.
Returns a tuple (index, token key).
'''
min_index, min_key = (-1, None)
for k in tokens:
index = string.find(k, start)
if index != -1 and (min_index == -1 or index < min_index):
min_index, min_key = (index, k)
return (min_index, min_key)
def _ReadString(input, start, output):
output.append('"')
start_range, end_range = (start, input.find('"', start))
# \" escapes the ", \\" doesn't, \\\" does, etc.
while (end_range != -1 and
_Rcount(input[start_range:end_range], '\\') % 2 == 1):
start_range, end_range = (end_range, input.find('"', end_range + 1))
if end_range == -1:
return start_range + 1
output.append(input[start:end_range + 1])
return end_range + 1
def _ReadComment(input, start, output):
eol_tokens = ('\n', '\r')
eol_token_index, eol_token = _FindNextToken(input, eol_tokens, start)
if eol_token is None:
return len(input)
output.append(eol_token)
return eol_token_index + len(eol_token)
def _ReadMultilineComment(input, start, output):
end_tokens = ('*/',)
end_token_index, end_token = _FindNextToken(input, end_tokens, start)
if end_token is None:
raise Exception("Multiline comment end token (*/) not found")
return end_token_index + len(end_token)
def Nom(input):
token_actions = {
'"': _ReadString,
'//': _ReadComment,
'/*': _ReadMultilineComment,
}
output = []
pos = 0
while pos < len(input):
token_index, token = _FindNextToken(input, token_actions.keys(), pos)
if token is None:
output.append(input[pos:])
break
output.append(input[pos:token_index])
pos = token_actions[token](input, token_index + len(token), output)
return ''.join(output)
if __name__ == '__main__':
sys.stdout.write(Nom(sys.stdin.read()))
|
mkheirkhah/ns-3.23
|
refs/heads/development
|
src/visualizer/visualizer/hud.py
|
189
|
import goocanvas
import core
import math
import pango
import gtk
class Axes(object):
def __init__(self, viz):
self.viz = viz
self.color = 0x8080C0FF
self.hlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.hlines.lower(None)
self.vlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.vlines.lower(None)
self.labels = []
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
def update(adj):
if self.visible:
self.update_view()
hadj.connect("value-changed", update)
vadj.connect("value-changed", update)
hadj.connect("changed", update)
vadj.connect("changed", update)
self.visible = True
self.update_view()
def set_visible(self, visible):
self.visible = visible
if self.visible:
self.hlines.props.visibility = goocanvas.ITEM_VISIBLE
self.vlines.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.hlines.props.visibility = goocanvas.ITEM_HIDDEN
self.vlines.props.visibility = goocanvas.ITEM_HIDDEN
for label in self.labels:
label.props.visibility = goocanvas.ITEM_HIDDEN
def _compute_divisions(self, xi, xf):
assert xf > xi
dx = xf - xi
size = dx
ndiv = 5
text_width = dx/ndiv/2
def rint(x):
return math.floor(x+0.5)
dx_over_ndiv = dx / ndiv
for n in range(5): # iterate 5 times to find optimum division size
#/* div: length of each division */
tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */
div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */
if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
div /= 2
elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
div *= 2 # /* test if div*2 is closer to dx/ndiv */
x0 = div*math.ceil(xi / div) - div
if n > 1:
ndiv = rint(size / text_width)
return x0, div
def update_view(self):
if self.viz.zoom is None:
return
unused_labels = self.labels
self.labels = []
for label in unused_labels:
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
def get_label():
try:
label = unused_labels.pop(0)
except IndexError:
label = goocanvas.Text(parent=self.viz.canvas.get_root_item(), stroke_color_rgba=self.color)
else:
label.set_property("visibility", goocanvas.ITEM_VISIBLE)
label.lower(None)
self.labels.append(label)
return label
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
zoom = self.viz.zoom.value
offset = 10/zoom
x1, y1 = self.viz.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.viz.canvas.convert_from_pixels(hadj.value + hadj.page_size, vadj.value + vadj.page_size)
line_width = 5.0/self.viz.zoom.value
# draw the horizontal axis
self.hlines.set_property("line-width", line_width)
yc = y2 - line_width/2
sim_x1 = x1/core.PIXELS_PER_METER
sim_x2 = x2/core.PIXELS_PER_METER
x0, xdiv = self._compute_divisions(sim_x1, sim_x2)
path = ["M %r %r L %r %r" % (x1, yc, x2, yc)]
x = x0
while x < sim_x2:
path.append("M %r %r L %r %r" % (core.PIXELS_PER_METER*x, yc - offset, core.PIXELS_PER_METER*x, yc))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % x),
fill_color_rgba=self.color,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=core.PIXELS_PER_METER*x,
y=(yc - offset))
x += xdiv
del x
self.hlines.set_property("data", " ".join(path))
# draw the vertical axis
self.vlines.set_property("line-width", line_width)
xc = x1 + line_width/2
sim_y1 = y1/core.PIXELS_PER_METER
sim_y2 = y2/core.PIXELS_PER_METER
y0, ydiv = self._compute_divisions(sim_y1, sim_y2)
path = ["M %r %r L %r %r" % (xc, y1, xc, y2)]
y = y0
while y < sim_y2:
path.append("M %r %r L %r %r" % (xc, core.PIXELS_PER_METER*y, xc + offset, core.PIXELS_PER_METER*y))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % y),
fill_color_rgba=self.color,
alignment=pango.ALIGN_LEFT,
anchor=gtk.ANCHOR_W,
x=xc + offset,
y=core.PIXELS_PER_METER*y)
y += ydiv
self.vlines.set_property("data", " ".join(path))
self.labels.extend(unused_labels)
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/plugin.video.castaway/resources/lib/resolvers/livestreamer.py
|
4
|
from resources.lib.modules import control,client
from resources.lib.modules.log_utils import log
import xbmc,os,re,urllib,base64
def resolve(url):
initial = url
libPath = os.path.join(control.addonPath, 'resources/lib/modules')
serverPath = os.path.join(libPath, 'livestreamerXBMCLocalProxy.py')
try:
import requests
requests.get('http://127.0.0.1:19000/version')
proxyIsRunning = True
except:
proxyIsRunning = False
if not proxyIsRunning:
xbmc.executebuiltin('RunScript(' + serverPath + ')')
url = re.findall('[\"\']([^\"\']+)',url)[0]
try:
headers = re.findall('-http-headers=([^\s]+)',url)[0]
except:
headers = urllib.urlencode({'User-agent':client.agent()})
url += '|' + headers
try:
cookies = re.findall('-http-cookie=([^\s]+)',initial)[0]
url += '|' + cookies
except:
pass
url = base64.b64encode(url)
url = 'http://127.0.0.1:19000/livestreamer/' + url + '|' + cookies
return url
|
takeshineshiro/django-cms
|
refs/heads/develop
|
cms/utils/i18n.py
|
55
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import LanguageError
from cms.utils.conf import get_cms_setting, get_site_id
@contextmanager
def force_language(new_lang):
old_lang = get_current_language()
if old_lang != new_lang:
translation.activate(new_lang)
yield
translation.activate(old_lang)
def get_languages(site_id=None):
site_id = get_site_id(site_id)
result = get_cms_setting('LANGUAGES').get(site_id)
if not result:
result = []
defaults = get_cms_setting('LANGUAGES').get('default', {})
for code, name in settings.LANGUAGES:
lang = {'code': code, 'name': _(name)}
lang.update(defaults)
result.append(lang)
get_cms_setting('LANGUAGES')[site_id] = result
return result
def get_language_code(language_code):
"""
Returns language code while making sure it's in LANGUAGES
"""
if not language_code:
return None
languages = get_language_list()
if language_code in languages: # direct hit
return language_code
for lang in languages:
if language_code.split('-')[0] == lang: # base language hit
return lang
if lang.split('-')[0] == language_code: # base language hit
return lang
return language_code
def get_current_language():
"""
Returns the currently active language
It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES.
Overcomes this issue: https://code.djangoproject.com/ticket/9340
"""
language_code = translation.get_language()
return get_language_code(language_code)
def get_language_list(site_id=None):
"""
:return: returns a list of iso2codes for this site
"""
return ([lang['code'] for lang in get_languages(site_id)] if settings.USE_I18N
else [settings.LANGUAGE_CODE])
def get_language_tuple(site_id=None):
"""
:return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site
"""
return [(lang['code'], lang['name']) for lang in get_languages(site_id)]
def get_language_dict(site_id=None):
"""
:return: returns an dict of cms languages
"""
return dict(get_language_tuple(site_id))
def get_public_languages(site_id=None):
"""
:return: list of iso2codes of public languages for this site
"""
return [lang['code'] for lang in get_language_objects(site_id)
if lang.get('public', True)]
def get_language_object(language_code, site_id=None):
"""
:param language_code: RFC5646 language code
:return: the language object filled up by defaults
"""
for language in get_languages(site_id):
if language['code'] == get_language_code(language_code):
return language
raise LanguageError('Language not found: %s' % language_code)
def get_language_objects(site_id=None):
"""
returns list of all language objects filled up by default values
"""
return list(get_languages(site_id))
def get_default_language(language_code=None, site_id=None):
"""
Returns default language depending on settings.LANGUAGE_CODE merged with
best match from get_cms_setting('LANGUAGES')
Returns: language_code
"""
if not language_code:
language_code = get_language_code(settings.LANGUAGE_CODE)
languages = get_language_list(site_id)
# first try if there is an exact language
if language_code in languages:
return language_code
# otherwise split the language code if possible, so iso3
language_code = language_code.split("-")[0]
if not language_code in languages:
return settings.LANGUAGE_CODE
return language_code
def get_fallback_languages(language, site_id=None):
"""
returns a list of fallback languages for the given language
"""
try:
language = get_language_object(language, site_id)
except LanguageError:
language = get_languages(site_id)[0]
return language.get('fallbacks', [])
def get_redirect_on_fallback(language, site_id=None):
"""
returns if you should redirect on language fallback
:param language:
:param site_id:
:return: Boolean
"""
language = get_language_object(language, site_id)
return language.get('redirect_on_fallback', True)
def hide_untranslated(language, site_id=None):
"""
Should untranslated pages in this language be hidden?
:param language:
:param site_id:
:return: A Boolean
"""
obj = get_language_object(language, site_id)
return obj.get('hide_untranslated', True)
def is_language_prefix_patterns_used():
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
return any(isinstance(url_pattern, LocaleRegexURLResolver)
for url_pattern in get_resolver(None).url_patterns)
|
byt3bl33d3r/sslstrip2
|
refs/heads/master
|
sslstrip/CookieCleaner.py
|
107
|
# Copyright (c) 2004-2011 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
import string
class CookieCleaner:
'''This class cleans cookies we haven't seen before. The basic idea is to
kill sessions, which isn't entirely straight-forward. Since we want this to
be generalized, there's no way for us to know exactly what cookie we're trying
to kill, which also means we don't know what domain or path it has been set for.
The rule with cookies is that specific overrides general. So cookies that are
set for mail.foo.com override cookies with the same name that are set for .foo.com,
just as cookies that are set for foo.com/mail override cookies with the same name
that are set for foo.com/
The best we can do is guess, so we just try to cover our bases by expiring cookies
in a few different ways. The most obvious thing to do is look for individual cookies
and nail the ones we haven't seen coming from the server, but the problem is that cookies are often
set by Javascript instead of a Set-Cookie header, and if we block those the site
will think cookies are disabled in the browser. So we do the expirations and whitlisting
based on client,server tuples. The first time a client hits a server, we kill whatever
cookies we see then. After that, we just let them through. Not perfect, but pretty effective.
'''
_instance = None
def getInstance():
if CookieCleaner._instance == None:
CookieCleaner._instance = CookieCleaner()
return CookieCleaner._instance
getInstance = staticmethod(getInstance)
def __init__(self):
self.cleanedCookies = set();
self.enabled = False
def setEnabled(self, enabled):
self.enabled = enabled
def isClean(self, method, client, host, headers):
if method == "POST": return True
if not self.enabled: return True
if not self.hasCookies(headers): return True
return (client, self.getDomainFor(host)) in self.cleanedCookies
def getExpireHeaders(self, method, client, host, headers, path):
domain = self.getDomainFor(host)
self.cleanedCookies.add((client, domain))
expireHeaders = []
for cookie in headers['cookie'].split(";"):
cookie = cookie.split("=")[0].strip()
expireHeadersForCookie = self.getExpireCookieStringFor(cookie, host, domain, path)
expireHeaders.extend(expireHeadersForCookie)
return expireHeaders
def hasCookies(self, headers):
return 'cookie' in headers
def getDomainFor(self, host):
hostParts = host.split(".")
return "." + hostParts[-2] + "." + hostParts[-1]
def getExpireCookieStringFor(self, cookie, host, domain, path):
pathList = path.split("/")
expireStrings = list()
expireStrings.append(cookie + "=" + "EXPIRED;Path=/;Domain=" + domain +
";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n")
expireStrings.append(cookie + "=" + "EXPIRED;Path=/;Domain=" + host +
";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n")
if len(pathList) > 2:
expireStrings.append(cookie + "=" + "EXPIRED;Path=/" + pathList[1] + ";Domain=" +
domain + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n")
expireStrings.append(cookie + "=" + "EXPIRED;Path=/" + pathList[1] + ";Domain=" +
host + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n")
return expireStrings
|
woobe/h2o
|
refs/heads/master
|
py/testdir_rpy2/test_GLM_both.py
|
2
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import copy
print "Needs numpy, rpy2, and R installed. Run on 192.168.171-175"
# FIX! maybe should update to build_cloud_with_hosts to run on 171-175?
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
import numpy as np
from rpy2 import robjects as ro
# y is h2o style. start at 0
def glm_R_and_compare(self, csvPathname, family, formula, y, h2oResults=None):
# df = ro.DataFrame.from_csvfile(csvPathname, col_names=col_names, header=False)
df = ro.DataFrame.from_csvfile(csvPathname, header=False)
cn = ro.r.colnames(df)
print df
fit = ro.r.glm(formula=ro.r(formula), data=df, family=ro.r(family + '(link="logit")'))
gsummary = ro.r.summary(fit)
# print ro.r.summary(fit)
coef = ro.r.coef(fit)
# FIX! where do the GLM warnings come from
warningsR = []
interceptR = coef[0]
# NEW: why did I have to chop off the end of the R list?
cListR = coef[1:-1]
if h2oResults is not None: # create delta list
(warningsH2O, cListH2O, interceptH2O) = h2oResults
interceptDelta = abs(abs(interceptH2O) - abs(interceptR))
cDelta = [abs(abs(a) - abs(b)) for a,b in zip(cListH2O, cListR)]
else:
(warningsH2O, cListH2O, interceptH2O) = (None, None, None)
interceptDelta = None
cDelta = [None for a in cListR]
def printit(self,a,b,c,d):
pctDiff = abs(d/c)*100
print "%-20s %-20.5e %8s %5.2f%% %10s %-20.5e" % \
("R " + a + " " + b + ":", c, "pct. diff:", pctDiff, "abs diff:", d)
# self.assertLess(pctDiff,1,"Expect <1% difference between H2O and R coefficient/intercept")
print
printit(self, "intercept", "", interceptR, interceptDelta)
print "compare lengths cListH2O, cListR, cDelta:", len(cListH2O), len(cListR), len(cDelta)
print "clistH2O:", cListH2O
print "clistR:", cListR
print "cn:", cn
print "cDelta:", cDelta
for i,cValue in enumerate(cListR):
printit(self , "coefficient", cn[i], cValue, cDelta[i])
### print "\nDumping some raw R results (info already printed above)"
### print "coef:", ro.r.coef(fit)
# what each index gives
gsummaryIndexDesc = [
'call',
'terms',
'family',
'deviance',
'aic',
'contrasts',
'df.residual',
'null.deviance',
'df.null',
'iter',
'deviance.resid',
'coefficients',
'aliased',
'dispersion',
'df',
'cov.unscaled',
'cov.scaled',
]
whatIwant = [
'family',
'deviance',
'aic',
'df.residual',
'null.deviance',
'df.null',
'iter',
]
for i,v in enumerate(gsummary):
if i >= len(gsummaryIndexDesc):
print 'gsummary entry unexpected'
else:
d = gsummaryIndexDesc[i]
if d in whatIwant:
print "%s %s %15s %s" % ("R gsummary", i, d + ":\t", gsummary[i][0])
# to get possiblities from gsummary above, do this
# print "summary", ro.r.summary(gsummary)
# print "residuals", ro.r.residuals(fit)
# print "predict", ro.r.predict(fit)
# print "fitted", ro.r.fitted(fit)
# HMM! what about if H2O has dropped coefficients but R doesn't ...oh well!
# not sure how to get from the R vector to python list..try this
# theoretically we shouldn't have to worry about it? . But we strip the intercept out too.
return (warningsR, cListR, interceptR)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.build_cloud(1,base_port=54400)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM_both(self):
h2o.beta_features = True
if (1==1):
csvFilenameList = [
('logreg', 'benign.csv', 'binomial', 3, 10),
# col is zero based
# FIX! what's wrong here? index error
## ('uis.dat', 'binomial', 8, 5, False),
## ('pros.dat', 'binomial', 1, 10, False),
## ('chdage.dat', 'binomial', 2, 5, True),
## ('icu.dat', 'binomial', 1, 10, False),
# how to ignore 6? '1,2,3,4,5', False),
## ('clslowbwt.dat', 'binomial', 7, 10, False),
# ('cgd.dat', 'gaussian', 12, 5, False),
# ('meexp.dat', 'gaussian', 3, 10, None),
]
else:
csvFilenameList = [
# leave out ID and birth weight
('logreg', 'benign.csv', 'gaussian', 3, 10),
(None, 'icu.dat', 'binomial', 1, 10),
# need to exclude col 0 (ID) and col 10 (bwt)
# but -x doesn't work..so do 2:9...range doesn't work? FIX!
(None, 'nhanes3.dat', 'binomial', 15, 10),
(None, 'lowbwt.dat', 'binomial', 1, 10),
(None, 'lowbwtm11.dat', 'binomial', 1, 10),
(None, 'meexp.dat', 'gaussian', 3, 10),
# FIX! does this one hang in R?
(None, 'nhanes3.dat', 'binomial', 15, 10),
(None, 'pbc.dat', 'gaussian', 1, 10),
(None, 'pharynx.dat', 'gaussian', 12, 10),
(None, 'uis.dat', 'binomial', 8, 10),
]
trial = 0
for (offset, csvFilename, family, y, timeoutSecs) in csvFilenameList:
# FIX! do something about this file munging
if offset:
csvPathname1 = offset + "/" + csvFilename
else:
csvPathname1 = 'logreg/umass_statdata/' + csvFilename
fullPathname = h2i.find_folder_and_filename('smalldata', csvPathname1, returnFullPath=True)
csvPathname2 = SYNDATASETS_DIR + '/' + csvFilename + '_2.csv'
h2o_util.file_clean_for_R(fullPathname, csvPathname2)
# we can inspect this to get the number of cols in the dataset (trust H2O here)
parseResult = h2i.import_parse(path=csvPathname2, schema='put', hex_key=csvFilename, timeoutSecs=10)
# we could specify key2 above but this is fine
destination_key = parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, destination_key)
if h2o.beta_features:
num_cols = inspect['numCols']
num_rows = inspect['numRows']
else:
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
print "num_cols", num_cols, "num_rows", num_rows
## print h2o.dump_json(inspect)
# create formula and the x for H2O GLM
formula = "V" + str(y+1) + " ~ "
x = None
col_names = ""
for c in range(0,num_cols):
if csvFilename=='clslowbwt.dat' and c==6:
print "Not including col 6 for this dataset from x"
if csvFilename=='benign.csv' and (c==0 or c==1):
print "Not including col 0,1 for this dataset from x"
else:
# don't add the output col to the RHS of formula
if x is None:
col_names += "V" + str(c+1)
else:
col_names += ",V" + str(c+1)
if c!=y:
if x is None:
x = str(c)
formula += "V" + str(c+1)
else:
x += "," + str(c)
formula += "+V" + str(c+1)
print 'formula:', formula
print 'col_names:', col_names
print 'x:', x
if h2o.beta_features:
kwargs = {
'n_folds': 0,
'response': y,
# what about x?
'family': family,
'alpha': 0,
'lambda': 0,
'beta_epsilon': 1.0E-4,
'max_iter': 50 }
else:
kwargs = {
'n_folds': 0,
'y': y,
'x': x,
'family': family,
'alpha': 0,
'lambda': 1e-4,
'beta_epsilon': 1.0E-4,
'max_iter': 50 }
if csvFilename=='benign.csv':
kwargs['ignored_cols'] = '0,1'
if csvFilename=='clslowbwt.dat':
kwargs['ignored_cols'] = '6'
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end (w/check) on ", csvPathname2, 'took', time.time()-start, 'seconds'
h2oResults = h2o_glm.simpleCheckGLM(self, glm, None, prettyPrint=True, **kwargs)
# now do it thru R and compare
(warningsR, cListR, interceptR) = glm_R_and_compare(self, csvPathname2, family, formula, y, h2oResults=h2oResults)
trial += 1
print "\nTrial #", trial
if __name__ == '__main__':
h2o.unit_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.