repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
instal/script.module.resolveurl/lib/resolveurl/plugins/holavid.py
|
2
|
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
resolveurl XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class HolaVidResolver(ResolveUrl):
name = "holavid"
domains = ['holavid.com']
pattern = '(?://|\.)(holavid\.com)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
if html:
_srcs = re.search(r'sources\s*:\s*\[(.+?)\]', html)
if _srcs:
srcs = helpers.scrape_sources(_srcs.group(1), patterns=['''["'](?P<url>http[^"']+)'''], result_blacklist=['.m3u8'])
if srcs:
headers.update({'Referer': web_url})
return helpers.pick_source(srcs) + helpers.append_headers(headers)
raise ResolverError('Unable to locate link')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
|
nelango/ViralityAnalysis
|
refs/heads/master
|
model/lib/nltk/corpus/reader/rte.py
|
10
|
# Natural Language Toolkit: RTE Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
were regularized.
Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
gold standard annotated files.
Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
example is taken from RTE3::
<pair id="1" entailment="YES" task="IE" length="short" >
<t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
company Baikalfinansgroup which was later bought by the Russian
state-owned oil company Rosneft .</t>
<h>Baikalfinansgroup was sold to Rosneft.</h>
</pair>
In order to provide globally unique IDs for each pair, a new attribute
``challenge`` has been added to the root element ``entailment-corpus`` of each
file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
challenge number and 'n' is the pair ID.
"""
from __future__ import unicode_literals
from nltk import compat
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.corpus.reader.xmldocs import *
def norm(value_string):
"""
Normalize the string value in an RTE pair's ``value`` or ``entailment``
attribute as an integer (1, 0).
:param value_string: the label used to classify a text/hypothesis pair
:type value_string: str
:rtype: int
"""
valdict = {"TRUE": 1,
"FALSE": 0,
"YES": 1,
"NO": 0}
return valdict[value_string.upper()]
@compat.python_2_unicode_compatible
class RTEPair(object):
"""
Container for RTE text-hypothesis pairs.
The entailment relation is signalled by the ``value`` attribute in RTE1, and by
``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
attribute of this class.
"""
def __init__(self, pair, challenge=None, id=None, text=None, hyp=None,
value=None, task=None, length=None):
"""
:param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
:param id: identifier for the pair
:param text: the text component of the pair
:param hyp: the hypothesis component of the pair
:param value: classification label for the pair
:param task: attribute for the particular NLP task that the data was drawn from
:param length: attribute for the length of the text of the pair
"""
self.challenge = challenge
self.id = pair.attrib["id"]
self.gid = "%s-%s" % (self.challenge, self.id)
self.text = pair[0].text
self.hyp = pair[1].text
if "value" in pair.attrib:
self.value = norm(pair.attrib["value"])
elif "entailment" in pair.attrib:
self.value = norm(pair.attrib["entailment"])
else:
self.value = value
if "task" in pair.attrib:
self.task = pair.attrib["task"]
else:
self.task = task
if "length" in pair.attrib:
self.length = pair.attrib["length"]
else:
self.length = length
def __repr__(self):
if self.challenge:
return '<RTEPair: gid=%s-%s>' % (self.challenge, self.id)
else:
return '<RTEPair: id=%s>' % self.id
class RTECorpusReader(XMLCorpusReader):
"""
Corpus reader for corpora in RTE challenges.
This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
structure of input documents.
"""
def _read_etree(self, doc):
"""
Map the XML input into an RTEPair.
This uses the ``getiterator()`` method from the ElementTree package to
find all the ``<pair>`` elements.
:param doc: a parsed XML document
:rtype: list(RTEPair)
"""
try:
challenge = doc.attrib['challenge']
except KeyError:
challenge = None
return [RTEPair(pair, challenge=challenge)
for pair in doc.getiterator("pair")]
def pairs(self, fileids):
"""
Build a list of RTEPairs from a RTE corpus.
:param fileids: a list of RTE corpus fileids
:type: list
:rtype: list(RTEPair)
"""
if isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
|
jordanemedlock/psychtruths
|
refs/heads/master
|
temboo/Library/Foursquare/Venues/SearchVenues.py
|
5
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SearchVenues
# Obtain a list of venues near the current location.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchVenues(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchVenues Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchVenues, self).__init__(temboo_session, '/Library/Foursquare/Venues/SearchVenues')
def new_input_set(self):
return SearchVenuesInputSet()
def _make_result_set(self, result, path):
return SearchVenuesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchVenuesChoreographyExecution(session, exec_id, path)
class SearchVenuesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchVenues
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccuracyOfCoordinates(self, value):
"""
Set the value of the AccuracyOfCoordinates input for this Choreo. ((optional, integer) Accuracy of latitude and longitude, in meters. Currently, this parameter does not affect search results.)
"""
super(SearchVenuesInputSet, self)._set_input('AccuracyOfCoordinates', value)
def set_AltitudeAccuracy(self, value):
"""
Set the value of the AltitudeAccuracy input for this Choreo. ((optional, integer) Accuracy of the user's altitude, in meters. Currently, this parameter does not affect search results.)
"""
super(SearchVenuesInputSet, self)._set_input('AltitudeAccuracy', value)
def set_Altitude(self, value):
"""
Set the value of the Altitude input for this Choreo. ((optional, integer) Altitude of the user's location, in meters. Currently, this parameter does not affect search results.)
"""
super(SearchVenuesInputSet, self)._set_input('Altitude', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) Your Foursquare client ID, obtained after registering at Foursquare. Required unless using the OauthToken input.)
"""
super(SearchVenuesInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) Your Foursquare client secret, obtained after registering at Foursquare. Required unless using the OauthToken input.)
"""
super(SearchVenuesInputSet, self)._set_input('ClientSecret', value)
def set_Intent(self, value):
"""
Set the value of the Intent input for this Choreo. ((optional, string) Indicates your intent when performing the search. Enter: checkin (default), match (requires Query and Latitude/Longitude to be provided).)
"""
super(SearchVenuesInputSet, self)._set_input('Intent', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude point of the user's location.)
"""
super(SearchVenuesInputSet, self)._set_input('Latitude', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Number of results to retun, up to 50.)
"""
super(SearchVenuesInputSet, self)._set_input('Limit', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude point of the user's location.)
"""
super(SearchVenuesInputSet, self)._set_input('Longitude', value)
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((conditional, string) The Foursquare API Oauth token string. Required unless specifying the ClientID and ClientSecret.)
"""
super(SearchVenuesInputSet, self)._set_input('OauthToken', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((optional, string) Your search string.)
"""
super(SearchVenuesInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(SearchVenuesInputSet, self)._set_input('ResponseFormat', value)
class SearchVenuesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchVenues Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class SearchVenuesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchVenuesResultSet(response, path)
|
nathanielvarona/airflow
|
refs/heads/master
|
airflow/providers/microsoft/azure/example_dags/example_azure_cosmosdb.py
|
10
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is only an example DAG to highlight usage of AzureCosmosDocumentSensor to detect
if a document now exists.
You can trigger this manually with `airflow dags trigger example_cosmosdb_sensor`.
*Note: Make sure that connection `azure_cosmos_default` is properly set before running
this example.*
"""
from airflow import DAG
from airflow.providers.microsoft.azure.operators.azure_cosmos import AzureCosmosInsertDocumentOperator
from airflow.providers.microsoft.azure.sensors.azure_cosmos import AzureCosmosDocumentSensor
from airflow.utils import dates
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
}
with DAG(
dag_id='example_azure_cosmosdb_sensor',
default_args=default_args,
start_date=dates.days_ago(2),
doc_md=__doc__,
tags=['example'],
) as dag:
t1 = AzureCosmosDocumentSensor(
task_id='check_cosmos_file',
database_name='airflow_example_db',
collection_name='airflow_example_coll',
document_id='airflow_checkid',
azure_cosmos_conn_id='azure_cosmos_default',
)
t2 = AzureCosmosInsertDocumentOperator(
task_id='insert_cosmos_file',
database_name='airflow_example_db',
collection_name='new-collection',
document={"id": "someuniqueid", "param1": "value1", "param2": "value2"},
azure_cosmos_conn_id='azure_cosmos_default',
)
t1 >> t2
|
leoc/home-assistant
|
refs/heads/dev
|
homeassistant/components/browser.py
|
55
|
"""
Provides functionality to launch a web browser on the host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/browser/
"""
import voluptuous as vol
DOMAIN = "browser"
SERVICE_BROWSE_URL = "browse_url"
ATTR_URL = 'url'
ATTR_URL_DEFAULT = 'https://www.google.com'
SERVICE_BROWSE_URL_SCHEMA = vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_URL, default=ATTR_URL_DEFAULT): vol.Url(),
})
def setup(hass, config):
"""Listen for browse_url events."""
import webbrowser
hass.services.register(DOMAIN, SERVICE_BROWSE_URL,
lambda service:
webbrowser.open(service.data[ATTR_URL]),
schema=SERVICE_BROWSE_URL_SCHEMA)
return True
|
scifiswapnil/Project-LoCatr
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
richstoner/incf_engine
|
refs/heads/master
|
virtuoso/tovirtuoso.py
|
1
|
__author__ = 'satra'
import hashlib
import os
import rdflib
import requests
def hash_infile(afile, crypto=hashlib.sha512, chunk_len=8192):
""" Computes hash of a file using 'crypto' module"""
hex = None
if os.path.isfile(afile):
crypto_obj = crypto()
fp = file(afile, 'rb')
while True:
data = fp.read(chunk_len)
if not data:
break
crypto_obj.update(data)
fp.close()
hex = crypto_obj.hexdigest()
return hex
query1 = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX fs: <http://freesurfer.net/fswiki/terms/>
PREFIX nidm: <http://nidm.nidash.org/terms/>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX niiri: <http://nidm.nidash.org/iri/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX nif: <http://neurolex.org/wiki/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX crypto: <http://www.w3.org/2000/10/swap/crypto#>
select ?id { ?c1 fs:subject_id ?id }
"""
query2 = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX fs: <http://freesurfer.net/fswiki/terms/>
PREFIX nidm: <http://nidm.nidash.org/terms/>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX niiri: <http://nidm.nidash.org/iri/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX nif: <http://neurolex.org/wiki/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX crypto: <http://www.w3.org/2000/10/swap/crypto#>
construct {
?c a prov:Agent;
nidm:id "%s";
nidm:Age ?age ;
nidm:Verbal_IQ ?viq ;
nidm:DX ?dx .
?e1 a prov:Entity;
prov:wasAttributedTo ?c;
a nif:nlx_inv_20090243;
crypto_info
prov:location "s3://adhd200/data/%s_anat_1.nii.gz" .
}
where
{?c fs:subject_id "%s" .
?c prov:hadMember ?e1 .
?e1 prov:label ?label .
FILTER(regex(?label, "001.mgz"))
SERVICE <http://computor.mit.edu:8890/sparql> {
?c2 nidm:ID "%s" .
?c2 nidm:Age ?age .
OPTIONAL { ?c2 nidm:Verbal_IQ ?viq } .
OPTIONAL { ?c2 nidm:DX ?dx} .
}
}
"""
endpoint1 = 'http://metadata.incf.org:8890/sparql'
endpoint2 = 'http://192.168.100.30:8890/sparql'
g = rdflib.ConjunctiveGraph('SPARQLStore')
g.open(endpoint1)
results = g.query(query1)
count = 0
for row in results:
count += 1
sid = str(row[0])
if len(sid) < 7 or not sid.startswith('001000'):
continue
query = query2 % (sid, sid, sid, sid)
filename = '/adhd200/%s_anat_1.nii.gz' % sid
if os.path.exists(filename):
sha = hash_infile(filename)
crypto_info = """
crypto:sha "%s";
prov:location "http://192.168.100.20/file/%s_anat_1.nii.gz";
"""
query = query.replace('crypto_info', crypto_info % (sha, sid))
else:
query = query.replace('crypto_info', '')
#print query
sidgraph = g.query(query)
print sidgraph.serialize(format='turtle').replace('nidm.nidash.org/iri',
'iri.nidash.org')
# session defaults
session = requests.Session()
session.headers = {'Accept':'text/html'} # HTML from SELECT queries
query = """
INSERT IN GRAPH <http://sfndemo.nidm.org>
{
%s
}
""" % sidgraph.serialize(format='nt').replace('nidm.nidash.org/iri',
'iri.nidash.org')
#print query
data = {'query': query}
result = session.post(endpoint2, data=data)
#print result
t1_query = """
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX nif: <http://neurolex.org/wiki/>
PREFIX crypto: <http://www.w3.org/2000/10/swap/crypto#>
select ?t1path ?sha where
{?e a prov:Entity;
a nif:nlx_inv_20090243;
crypto:sha ?sha;
prov:location ?t1path .
FILTER(regex(?t1path, "http*"))
}
"""
for row in sidgraph.graph.query(t1_query):
print row
|
openstack/rally
|
refs/heads/master
|
tests/unit/task/test_engine.py
|
1
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the Test engine."""
import collections
import itertools
import threading
from unittest import mock
from rally.common import objects
from rally import consts
from rally import exceptions
from rally.task import context
from rally.task import engine
from rally.task import scenario
from rally.task import task_cfg
from tests.unit import test
class MyException(exceptions.RallyException):
msg_fmt = "MyException"
class TaskEngineTestCase(test.TestCase):
@staticmethod
def _make_workload(name, args=None, description=None, contexts=None,
sla=None, runner=None, hooks=None, position=0):
return {"uuid": "foo",
"name": name,
"position": position,
"description": description,
"args": args,
"contexts": contexts or {},
"runner_type": runner[0] if runner else "serial",
"runner": runner[1] if runner else {},
"sla": sla or {},
"hooks": hooks or []}
def test_init(self):
config = mock.MagicMock()
task = mock.MagicMock()
eng = engine.TaskEngine(config, task, mock.Mock())
self.assertEqual(eng.config, config)
self.assertEqual(eng.task, task)
@mock.patch("jsonschema.validate")
def test_validate(self, mock_validate):
config = mock.MagicMock()
eng = engine.TaskEngine(config, mock.MagicMock(),
mock.Mock())
mock_validate = mock.MagicMock()
eng._validate_config_syntax = mock_validate.syntax
eng._validate_config_platforms = mock_validate.platforms
eng._validate_config_semantic = mock_validate.semantic
eng.validate()
mock_validate.syntax.assert_called_once_with(config)
mock_validate.platforms.assert_called_once_with(config)
mock_validate.semantic.assert_called_once_with(config)
def test_validate__wrong_syntax(self):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
e = exceptions.InvalidTaskConfig(name="foo", pos=0, config="",
reason="foo")
eng._validate_config_syntax = mock.MagicMock(side_effect=e)
eng._validate_config_platforms = mock.Mock()
actual_e = self.assertRaises(exceptions.InvalidTaskException,
eng.validate)
self.assertEqual(e, actual_e)
self.assertTrue(task.set_failed.called)
# the next validation step should not be processed
self.assertFalse(eng._validate_config_platforms.called)
def test_validate__wrong_semantic(self):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
e = exceptions.InvalidTaskConfig(name="foo", pos=0, config="",
reason="foo")
eng._validate_config_syntax = mock.MagicMock()
eng._validate_config_platforms = mock.MagicMock()
eng._validate_config_semantic = mock.MagicMock(side_effect=e)
actual_e = self.assertRaises(exceptions.InvalidTaskException,
eng.validate)
self.assertEqual(e, actual_e)
self.assertTrue(task.set_failed.called)
# all steps of validation are called, which means that the last one is
# failed
self.assertTrue(eng._validate_config_syntax)
self.assertTrue(eng._validate_config_platforms)
self.assertTrue(eng._validate_config_semantic)
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.sla.SLA.validate")
@mock.patch("rally.task.hook.HookTrigger.validate")
@mock.patch("rally.task.hook.HookAction.validate")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.Context.validate")
def test__validate_workload(
self, mock_context_validate,
mock_scenario_runner_validate,
mock_hook_action_validate,
mock_hook_trigger_validate,
mock_sla_validate,
mock_scenario_get):
mock_context_validate.return_value = []
mock_sla_validate.return_value = []
mock_hook_action_validate.return_value = []
mock_hook_trigger_validate.return_value = []
default_context = {"foo": "foo_conf"}
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_platform.return_value = "default"
scenario_cls.get_default_context.return_value = default_context
scenario_name = "Foo.bar"
runner_type = "MegaRunner"
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
workload = {"name": scenario_name,
"runner_type": runner_type,
"runner": {},
"contexts": {"a": "a_conf"},
"hooks": [hook_conf],
"sla": {"foo_sla": "sla_conf"},
"position": 2}
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
eng._validate_workload(workload)
mock_scenario_runner_validate.assert_called_once_with(
name=runner_type, context=None, config=None,
plugin_cfg={}, vtype=None)
self.assertEqual([mock.call(name="a",
context=None,
config=None,
plugin_cfg="a_conf",
vtype=None),
mock.call(name="foo",
context=None,
config=None,
plugin_cfg="foo_conf",
allow_hidden=True,
vtype=None)],
mock_context_validate.call_args_list)
mock_sla_validate.assert_called_once_with(
config=None, context=None,
name="foo_sla", plugin_cfg="sla_conf", vtype=None)
mock_hook_action_validate.assert_called_once_with(
config=None, context=None, name="c", plugin_cfg="c_args",
vtype=None)
mock_hook_trigger_validate.assert_called_once_with(
config=None, context=None, name="d", plugin_cfg="d_args",
vtype=None)
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
def test___validate_workload__wrong_runner(
self, mock_scenario_runner_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_scenario_runner_validate.return_value = [
"There is no such runner"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
workload = self._make_workload(name="sca", runner=("b", {}))
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_workload, workload)
self.assertEqual("Input task is invalid!\n\nSubtask sca[0] has wrong "
"configuration\nSubtask configuration:\n"
"<JSON>\n\nReason(s):\n"
" There is no such runner", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.engine.context.Context.validate")
def test__validate_config_syntax__wrong_context(
self, mock_context_validate, mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_context_validate.return_value = ["context_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
contexts={"a": "a_conf"})
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\nSubtask sca[1] has wrong "
"configuration\nSubtask configuration:\n<JSON>\n\n"
"Reason(s):\n context_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.sla.SLA.validate")
def test__validate_config_syntax__wrong_sla(
self, mock_sla_validate, mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_sla_validate.return_value = ["sla_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
sla={"foo_sla": "sla_conf"})
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n sla_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.hook.HookAction.validate")
@mock.patch("rally.task.hook.HookTrigger.validate")
def test__validate_config_syntax__wrong_hook(
self, mock_hook_trigger_validate,
mock_hook_action_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_hook_trigger_validate.return_value = []
mock_hook_action_validate.return_value = ["hook_error"]
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n hook_error", e.format_message())
@mock.patch("rally.task.engine.json.dumps")
@mock.patch("rally.task.engine.scenario.Scenario.get")
@mock.patch("rally.task.hook.HookTrigger.validate")
@mock.patch("rally.task.hook.HookAction.validate")
def test__validate_config_syntax__wrong_trigger(
self, mock_hook_action_validate,
mock_hook_trigger_validate,
mock_scenario_get, mock_dumps):
mock_dumps.return_value = "<JSON>"
mock_hook_trigger_validate.return_value = ["trigger_error"]
mock_hook_action_validate.return_value = []
scenario_cls = mock_scenario_get.return_value
scenario_cls.get_default_context.return_value = {}
mock_task_instance = mock.MagicMock()
hook_conf = {"action": ("c", "c_args"),
"trigger": ("d", "d_args")}
mock_task_instance.subtasks = [{"workloads": [
self._make_workload(name="sca"),
self._make_workload(name="sca", position=1,
hooks=[hook_conf])
]}]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
mock.Mock())
e = self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
self.assertEqual("Input task is invalid!\n\n"
"Subtask sca[1] has wrong configuration\n"
"Subtask configuration:\n<JSON>\n\n"
"Reason(s):\n trigger_error", e.format_message())
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test__validate_config_semantic(self, mock_context_manager_setup,
mock_context_manager_cleanup):
env = mock.MagicMock(uuid="env_uuid")
env.check_health.return_value = {
"foo": {"available": True, "message": ""},
"bar": {"available": True, "message": ""}
}
@scenario.configure("SomeScen.scenario")
class SomeScen(scenario.Scenario):
def run(self):
pass
mock_task_instance = mock.MagicMock()
wconf1 = self._make_workload(name="SomeScen.scenario")
wconf2 = self._make_workload(name="SomeScen.scenario",
position=1)
subtask1 = {"workloads": [wconf1, wconf2]}
wconf3 = self._make_workload(name="SomeScen.scenario",
position=2)
subtask2 = {"workloads": [wconf3]}
mock_task_instance.subtasks = [subtask1, subtask2]
fake_task = mock.MagicMock()
eng = engine.TaskEngine(mock_task_instance, fake_task, env)
eng._validate_config_semantic(mock_task_instance)
env.check_health.return_value = {
"foo": {"available": True, "message": ""},
"bar": {"available": False, "message": "", "traceback": "AAAA"}
}
self.assertRaises(exceptions.ValidationError,
eng._validate_config_semantic,
mock_task_instance)
@mock.patch("rally.task.engine.TaskEngine._validate_workload")
def test__validate_config_platforms(self, mock__validate_workload):
foo_cred = {"admin": "admin", "users": ["user1"]}
env = mock.MagicMock(data={
"platforms": {
"foo": {
"platform_name": "foo", "platform_data": foo_cred
}
}
})
workload1 = "workload1"
workload2 = "workload2"
subtasks = [{"workloads": [workload1]},
{"workloads": [workload2]}]
config = mock.Mock(subtasks=subtasks)
eng = engine.TaskEngine({}, mock.MagicMock(), env)
eng._validate_config_platforms(config)
self.assertEqual(
[mock.call(w, vtype="platform",
vcontext={"platforms": {"foo": foo_cred},
"task": eng.task})
for w in (workload1, workload2)],
mock__validate_workload.call_args_list)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__update_status(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTING
eng = engine.TaskEngine(mock.MagicMock(), task, mock.Mock())
eng.run()
task.update_status.assert_has_calls([
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.FINISHED)
])
@mock.patch("rally.task.engine.objects.task.Task.get_status")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.Context")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run_exception_is_logged(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario, mock_context,
mock_result_consumer, mock_log, mock_task_get_status):
scenario_cls = mock_scenario.get.return_value
scenario_cls.get_default_context.return_value = {}
context_cls = mock_context.get.return_value
context_cls.get_fullname.return_value = "context_a"
mock_context_manager_setup.side_effect = Exception
mock_result_consumer.is_task_in_aborting_status.return_value = False
mock_task_instance = mock.MagicMock()
mock_task_instance.subtasks = [{
"title": "foo",
"description": "Do not launch it!!",
"contexts": {},
"workloads": [
self._make_workload(name="a.task", description="foo",
contexts={"context_a": {"a": 1}}),
self._make_workload(name="a.task", description="foo",
contexts={"context_a": {"b": 2}},
position=2)]}]
eng = engine.TaskEngine(mock_task_instance, mock.MagicMock(),
mock.MagicMock())
eng.run()
self.assertEqual(2, mock_log.exception.call_count)
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_soft_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer):
scenario_cls = mock_scenario.get.return_value
scenario_cls.get_platform.return_value = "openstack"
scenario_cls.get_info.return_value = {"title": ""}
task = mock.MagicMock()
mock_result_consumer.is_task_in_aborting_status.side_effect = [False,
False,
True]
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1},
"description": "foo"}],
"b.task": [{"runner": {"type": "a", "b": 1},
"description": "bar"}],
"c.task": [{"runner": {"type": "a", "b": 1},
"description": "xxx"}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.MagicMock())
eng.run()
self.assertEqual(2, fake_runner.run.call_count)
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
subtask_obj = task.add_subtask.return_value
subtask_obj.update_status.assert_has_calls((
mock.call(consts.SubtaskStatus.FINISHED),
mock.call(consts.SubtaskStatus.FINISHED),
mock.call(consts.SubtaskStatus.ABORTED),
))
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock(spec=objects.Task)
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_task_get_status.return_value = consts.TaskStatus.SOFT_ABORTING
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.Mock())
eng.run()
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
subtask_obj = task.add_subtask.return_value
subtask_obj.update_status.assert_called_once_with(
consts.SubtaskStatus.ABORTED)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__subtask_crashed(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock(spec=objects.Task)
subtask_obj = task.add_subtask.return_value
subtask_obj.add_workload.side_effect = MyException()
mock_result_consumer.is_task_in_aborting_status.return_value = False
config = task_cfg.TaskConfig({
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
})
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task, mock.Mock())
self.assertRaises(MyException, eng.run)
task.update_status.assert_has_calls((
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.CRASHED),
))
subtask_obj.update_status.assert_called_once_with(
consts.SubtaskStatus.CRASHED)
def test__prepare_context(self):
@context.configure("test1", 1, platform="testing")
class TestContext1(context.Context):
pass
self.addCleanup(TestContext1.unregister)
@context.configure("test2", 2, platform="testing")
class TestContext2(context.Context):
pass
self.addCleanup(TestContext2.unregister)
@scenario.configure("test_ctx.test", platform="testing",
context={"test1@testing": {"a": 1}})
class TestScenario(scenario.Scenario):
pass
self.addCleanup(TestScenario.unregister)
task = mock.MagicMock()
name = "test_ctx.test"
context_config = {"test1": 1, "test2": 2}
env = mock.MagicMock()
eng = engine.TaskEngine({}, task, env)
result = eng._prepare_context(context_config, name, "foo_uuid")
expected_result = {
"task": task,
"owner_id": "foo_uuid",
"scenario_name": name,
"config": {"test1@testing": 1, "test2@testing": 2},
"env": env.data
}
self.assertEqual(expected_result, result)
class ResultConsumerTestCase(test.TestCase):
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = [
[{"duration": 1, "timestamp": 3}],
[{"duration": 2, "timestamp": 2}]
]
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager) as consumer_obj:
pass
mock_sla_instance.add_iteration.assert_has_calls([
mock.call({"duration": 1, "timestamp": 3}),
mock.call({"duration": 2, "timestamp": 2})])
self.assertEqual([{"duration": 2, "timestamp": 2},
{"duration": 1, "timestamp": 3}],
consumer_obj.results)
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.time.time")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_no_iteration(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_time, mock_log, mock_hook_executor):
mock_time.side_effect = [0, 1]
mock_sla_instance = mock.MagicMock()
mock_sla_results = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_sla_instance.results.return_value = mock_sla_results
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = []
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager):
pass
self.assertFalse(workload.add_workload_data.called)
workload.set_results.assert_called_once_with(
full_duration=1, sla_results=mock_sla_results, load_duration=0,
start_time=None,
contexts_results=ctx_manager.contexts_results())
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_abort(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[[{"duration": 1, "timestamp": 1},
{"duration": 2, "timestamp": 2}]] * 4)
iteration_count = len(list(
itertools.chain(*runner.result_queue)
))
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_sla_instance.add_iteration.side_effect = [
i < 3 for i in range(iteration_count)
]
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager):
pass
self.assertTrue(runner.abort.called)
task.update_status.assert_called_once_with(
consts.TaskStatus.SOFT_ABORTING)
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_abort_manually(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status,
mock_hook_executor):
runner = mock.MagicMock(result_queue=False)
is_done = mock.MagicMock()
is_done.isSet.side_effect = (False, True)
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTED
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
workload_cfg = {"fake": 2, "hooks": []}
mock_hook_executor_instance = mock_hook_executor.return_value
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager):
pass
mock_sla_checker.assert_called_once_with(workload_cfg)
mock_hook_executor.assert_called_once_with(workload_cfg, task)
self.assertFalse(mock_hook_executor_instance.on_iteration.called)
mocked_set_aborted = mock_sla_checker.return_value.set_aborted_manually
mocked_set_aborted.assert_called_once_with()
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_continue(self, mock_sla_checker,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.CRASHED
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[[{"duration": 1, "timestamp": 4}]] * 4)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
ctx_manager=ctx_manager,
abort_on_sla_failure=False):
pass
self.assertEqual(0, runner.abort.call_count)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_with_unexpected_failure(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
runner.result_queue = collections.deque([1])
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
exc = MyException()
try:
with engine.ResultConsumer(workload_cfg, task=task,
subtask=subtask, workload=workload,
runner=runner, ctx_manager=ctx_manager,
abort_on_sla_failure=False):
raise exc
except MyException:
pass
else:
self.fail("ResultConsumer should re-raise the exception.")
mock_sla_instance.set_unexpected_failure.assert_has_calls(
[mock.call(exc)])
@mock.patch("rally.task.engine.CONF")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_chunked(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_conf):
mock_conf.raw_result_chunk_size = 2
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": []}
task = mock.MagicMock(spec=objects.Task)
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
results = [
[{"duration": 1, "timestamp": 3},
{"duration": 2, "timestamp": 2},
{"duration": 3, "timestamp": 3}],
[{"duration": 4, "timestamp": 2},
{"duration": 5, "timestamp": 3}],
[{"duration": 6, "timestamp": 2}],
[{"duration": 7, "timestamp": 1}],
]
runner.result_queue = collections.deque(results)
runner.event_queue = collections.deque()
ctx_manager = mock.MagicMock()
with engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=False,
ctx_manager=ctx_manager) as consumer_obj:
pass
mock_sla_instance.add_iteration.assert_has_calls([
mock.call({"duration": 1, "timestamp": 3}),
mock.call({"duration": 2, "timestamp": 2}),
mock.call({"duration": 3, "timestamp": 3}),
mock.call({"duration": 4, "timestamp": 2}),
mock.call({"duration": 5, "timestamp": 3}),
mock.call({"duration": 6, "timestamp": 2}),
mock.call({"duration": 7, "timestamp": 1})])
self.assertEqual([{"duration": 7, "timestamp": 1}],
consumer_obj.results)
workload.add_workload_data.assert_has_calls([
mock.call(0, {"raw": [{"duration": 2, "timestamp": 2},
{"duration": 1, "timestamp": 3}]}),
mock.call(1, {"raw": [{"duration": 4, "timestamp": 2},
{"duration": 3, "timestamp": 3}]}),
mock.call(2, {"raw": [{"duration": 6, "timestamp": 2},
{"duration": 5, "timestamp": 3}]}),
mock.call(3, {"raw": [{"duration": 7, "timestamp": 1}]})])
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.hook.HookExecutor")
@mock.patch("rally.task.engine.time.time")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_events(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status, mock_time, mock_hook_executor, mock_log):
mock_time.side_effect = [0, 1]
mock_sla_instance = mock_sla_checker.return_value
mock_sla_results = mock_sla_instance.results.return_value
mock_hook_executor_instance = mock_hook_executor.return_value
mock_hook_results = mock_hook_executor_instance.results.return_value
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
workload_cfg = {"fake": 2, "hooks": [{"config": True}]}
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
runner = mock.MagicMock()
events = [
{"type": "iteration", "value": 1},
{"type": "iteration", "value": 2},
{"type": "iteration", "value": 3}
]
runner.result_queue = collections.deque()
runner.event_queue = collections.deque(events)
ctx_manager = mock.MagicMock()
consumer_obj = engine.ResultConsumer(
workload_cfg, task=task, subtask=subtask, workload=workload,
runner=runner, abort_on_sla_failure=False, ctx_manager=ctx_manager)
stop_event = threading.Event()
def set_stop_event(event_type, value):
if not runner.event_queue:
stop_event.set()
mock_hook_executor_instance.on_event.side_effect = set_stop_event
with consumer_obj:
stop_event.wait(1)
mock_hook_executor_instance.on_event.assert_has_calls([
mock.call(event_type="iteration", value=1),
mock.call(event_type="iteration", value=2),
mock.call(event_type="iteration", value=3)
])
self.assertFalse(workload.add_workload_data.called)
workload.set_results.assert_called_once_with(
full_duration=1,
load_duration=0,
sla_results=mock_sla_results,
hooks_results=mock_hook_results,
start_time=None,
contexts_results=ctx_manager.contexts_results())
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
def test_wait_and_abort_on_abort(
self, mock_sleep, mock_task_engine__prepare_context,
mock_task_get_status, mock_event, mock_thread):
runner = mock.MagicMock()
workload_cfg = mock.MagicMock()
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
mock_task_get_status.side_effect = (consts.TaskStatus.RUNNING,
consts.TaskStatus.RUNNING,
consts.TaskStatus.ABORTING)
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
mock_is_done.isSet.return_value = False
ctx_manager = mock.MagicMock()
res = engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager)
res.wait_and_abort()
runner.abort.assert_called_with()
# test task.get_status is checked until is_done is not set
self.assertEqual(3, mock_task_get_status.call_count)
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
def test_wait_and_abort_on_no_abort(
self, mock_sleep, mock_task_engine__prepare_context,
mock_task_get_status, mock_event, mock_thread):
runner = mock.MagicMock()
workload_cfg = mock.MagicMock()
task = mock.MagicMock()
subtask = mock.Mock(spec=objects.Subtask)
workload = mock.Mock(spec=objects.Workload)
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
ctx_manager = mock.MagicMock()
mock_is_done.isSet.side_effect = [False, False, False, False, True]
res = engine.ResultConsumer(workload_cfg, task=task, subtask=subtask,
workload=workload, runner=runner,
abort_on_sla_failure=True,
ctx_manager=ctx_manager)
res.wait_and_abort()
# check method don't abort runner if task is not aborted
self.assertFalse(runner.abort.called)
# test task.get_status is checked until is_done is not set
self.assertEqual(4, mock_task_get_status.call_count)
|
gangadhar-kadam/nassimapp
|
refs/heads/master
|
patches/october_2013/p02_set_communication_status.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
def execute():
webnotes.reload_doc("core", "doctype", "communication")
webnotes.conn.sql("""update tabCommunication
set sent_or_received= if(ifnull(recipients, '')='', "Received", "Sent")""")
|
bguillot/OpenUpgrade
|
refs/heads/master
|
addons/account_test/__init__.py
|
441
|
import account_test
import report
|
bhargav/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_omp.py
|
272
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bibxpert/bibxpert
|
refs/heads/master
|
operations/search/arxiv.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2015 Rafael Ferreira da Silva
# http://www.rafaelsilva.com/tools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Rafael Ferreira da Silva"
import logging
import time
import urllib
from entries import entry
from tools import utils
from xml.etree import ElementTree
log = logging.getLogger(__name__)
def process(entries):
"""
Look for arXiv database to update the bibliography entries.
:param entries: list of bibtex entries
:return:
"""
log.info("Seeking for arXiv entries")
count = 0
for e in entries:
if e.online_processed:
log.debug("Entry '%s' already processed." % e.cite_key)
continue
if e.url and "arxiv" in e.url:
# use id
id = e.url[e.url.rfind('/') + 1:]
url = "http://export.arxiv.org/api/query?id_list=%s" % id
else:
title = utils.clean_field(e.title)
title = title.replace("-", " ")
title = title.replace(" ", "+")
title = urllib.quote(title)
url = "http://export.arxiv.org/api/query?search_query=ti:%%22%s%%22&start=0&max_results=1" % title
data = urllib.urlopen(url).read()
root = ElementTree.fromstring(data)
results = int(root.findall('{http://a9.com/-/spec/opensearch/1.1/}totalResults')[0].text)
if results == 0:
# no result found
log.debug("No result found for entry '%s' on arXiv." % e.title)
continue
e_entry = root.findall('{http://www.w3.org/2005/Atom}entry')[0]
title = e_entry.findall('{http://www.w3.org/2005/Atom}title')[0].text
title = title.replace("\n", "")
url = e_entry.findall('{http://www.w3.org/2005/Atom}id')[0].text
year = e_entry.findall('{http://www.w3.org/2005/Atom}published')[0].text
year = year[:4]
authors_list = ""
for author in e_entry.findall('{http://www.w3.org/2005/Atom}author'):
if len(authors_list) > 0:
authors_list += " and "
authors_list += author[0].text
e.merge(entry.Entry(
title=title,
authors=authors_list,
url=url,
year=year
))
e.online_processed = True
log.debug("[arXiv] Updated entry '%s'." % e.title)
count += 1
time.sleep(0.5)
if count > 0:
log.info("Updated %s entries according to arXiv." % count)
return entries
|
humangeo/rawes
|
refs/heads/master
|
tests/integration/connection_pool_integration_tests.py
|
1
|
import unittest
from mock import patch, MagicMock
from rawes.elastic import Elastic
from requests.models import Response
from rawes.http_connection import HttpConnection
class TestConnectionPooling(unittest.TestCase):
"""Connection pooling was added on top of Rawes, it wasn't designed from
the beggingin. We need some tests to ensure our expectations of the
connection pooling are met.
"""
def testBasicRoundRobin(self):
""" Set up a client with three different hosts to connect to, make
multiple calls and check that each call goes on a different host in a
Round Robin fashion
"""
hosts = ['http://someserver1:9200', 'http://someserver2:9200',
'http://someserver3:9200']
es = Elastic(hosts, connection_pool_kwargs={'dead_timeout': 10})
with patch('rawes.http_connection.requests.Session.request',
MagicMock(return_value=None)) as request:
request.return_value = Response()
called = []
for _ in xrange(len(hosts)):
es.get()
# Save a list of called hosts (and remove trailing /)
called.append(request.call_args[0][1][:-1])
# Check against original hosts list
self.assertSetEqual(set(hosts), set(called),
'All hosts in coonnection pool should be used')
called_again = []
for _ in xrange(len(hosts)):
es.get()
# Call the same hosts again (don't forget about the trailing /)
called_again.append(request.call_args[0][1][:-1])
# Check they were called in the same order as before
self.assertListEqual(called, called_again,
'Round robin order wasn\'t preserved')
|
Rapptz/discord.py
|
refs/heads/master
|
discord/player.py
|
1
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
log: logging.Logger = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: str, *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def cleanup(self) -> None:
proc = self._process
if proc is MISSING:
return
log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
log.exception("Ignoring error attempting to kill ffmpeg process %s", proc.pid)
if proc.poll() is None:
log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
self._process = self._stdout = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: str,
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': source if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: str,
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': source if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
log.exception("Fallback probe using '%s' failed", executable)
else:
log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
log.info("Speaking call in player failed: %s", e)
|
davide-romanini/ComicStreamer
|
refs/heads/master
|
libs/rumps/__init__.py
|
6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rumps: Ridiculously Uncomplicated Mac os x Python Statusbar apps.
# Copyright: (c) 2013, Jared Suttles. All rights reserved.
# License: BSD, see LICENSE for details.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
rumps: Ridiculously Uncomplicated Mac os x Python Statusbar apps.
Classes:
App(name[, title[, icon[, menu]]]) --> App object representing your application
Window(message[, title[, default_text[, ok[, cancel[, dimensions]]]]]) --> Window object controlling a pop-up window
for consuming user input
Timer(callback, interval) --> Timer object that will call the function every interval seconds
MenuItem(title[, callback[, key[, icon[, dimensions]]]]) --> MenuItem object representing an item of a menu and any
associated submenu
Decorators:
@notifications --> Decorator for function dealing with incoming notifications
@clicked(*args) --> Decorator for function responding to click event on a MenuItem
@timer(interval) --> Decorator for function to be called every interval seconds
Functions:
timers() --> Returns a set of Timer objects
application_support(name) --> Returns the path to the application support folder for the given application name
notification(title[, subtitle[, message[, data[, sound]]]]) --> Sends a Mac OS X 10.8 notification
alert(title[, message[, ok[, cancel]]]) --> Opens an alert window
debug_mode(choice) --> Runs the application in debug mode with verbose output if True
"""
__title__ = 'rumps'
__version__ = '0.1.4'
__author__ = 'Jared Suttles'
__license__ = 'Modified BSD'
__copyright__ = 'Copyright 2013 Jared Suttles'
from .rumps import (separator, debug_mode, alert, notification, application_support, timers, timer, clicked,
notifications, MenuItem, Timer, Window, App)
|
HyperBaton/ansible
|
refs/heads/devel
|
lib/ansible/executor/module_common.py
|
14
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import json
import os
import shlex
import zipfile
import re
import pkgutil
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
try:
import importlib.util
import importlib.machinery
imp = None
except ImportError:
import imp
# HACK: keep Python 2.6 controller tests happy in CI until they're properly split
try:
from importlib import import_module
except ImportError:
import_module = __import__
# if we're on a Python that doesn't have FNFError, redefine it as IOError (since that's what we'll see)
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
%(rlimit)s
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import runpy
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
PY3 = False
else:
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
try:
import coverage
except ImportError:
print('{"msg": "Could not import `coverage` module.", "failed": true}')
sys.exit(1)
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
try:
if PY3:
import importlib.util
if importlib.util.find_spec('coverage') is None:
raise ImportError
else:
import imp
imp.find_module('coverage')
except ImportError:
print('{"msg": "Could not find `coverage` module.", "failed": true}')
sys.exit(1)
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
# Do this instead of getting site-packages from distutils.sysconfig so we work when we
# haven't been installed
site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.py$' % site_packages)
COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.py$')
# Detect new-style Python modules by looking for required imports:
# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
# import ansible.module_utils[.basic]
# from ansible.module_utils[ import basic]
# from ansible.module_utils[.basic import AnsibleModule]
# from ..module_utils[ import basic]
# from ..module_utils[.basic import AnsibleModule]
NEW_STYLE_PYTHON_MODULE_RE = re.compile(
# Relative imports
br'(?:from +\.{2,} *module_utils.* +import |'
# Collection absolute imports:
br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
# Core absolute imports
br'from +ansible\.module_utils.* +import |'
br'import +ansible\.module_utils\.)'
)
class ModuleDepFinder(ast.NodeVisitor):
def __init__(self, module_fqn, *args, **kwargs):
"""
Walk the ast tree for the python module.
:arg module_fqn: The fully qualified name to reach this module in dotted notation.
example: ansible.module_utils.basic
Save submodule[.submoduleN][.identifier] into self.submodules
when they are from ansible.module_utils or ansible_collections packages
self.submodules will end up with tuples like:
- ('ansible', 'module_utils', 'basic',)
- ('ansible', 'module_utils', 'urls', 'fetch_url')
- ('ansible', 'module_utils', 'database', 'postgres')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
It's up to calling code to determine whether the final element of the
tuple are module names or something else (function, class, or variable names)
.. seealso:: :python3:class:`ast.NodeVisitor`
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
self.module_fqn = module_fqn
def visit_Import(self, node):
"""
Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
for alias in node.names:
if (alias.name.startswith('ansible.module_utils.') or
alias.name.startswith('ansible_collections.')):
py_mod = tuple(alias.name.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
"""
Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
Also has to handle relative imports
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
# FIXME: These should all get skipped:
# from ansible.executor import module_common
# from ...executor import module_common
# from ... import executor (Currently it gives a non-helpful error)
if node.level > 0:
if self.module_fqn:
parts = tuple(self.module_fqn.split('.'))
if node.module:
# relative import: from .module import x
node_module = '.'.join(parts[:-node.level] + (node.module,))
else:
# relative import: from . import x
node_module = '.'.join(parts[:-node.level])
else:
# fall back to an absolute import
node_module = node.module
else:
# absolute import: from module import x
node_module = node.module
# Specialcase: six is a special case because of its
# import logic
py_mod = None
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node_module.startswith('ansible.module_utils'):
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
# from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
py_mod = tuple(node_module.split('.'))
elif node_module.startswith('ansible_collections.'):
if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
# from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
# from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
# FIXME: Unhandled cornercase (needs to be ignored):
# from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
py_mod = tuple(node_module.split('.'))
else:
# Not from module_utils so ignore. for instance:
# from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
pass
if py_mod:
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
with open(path, 'rb') as fd:
data = fd.read()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_name = os.path.basename(interpreter).strip()
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
# check for first-class interpreter config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
if C.config.get_configuration_definitions().get(interpreter_config_key):
# a config def exists for this interpreter type; consult config for the value
interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
interpreter_out = templar.template(interpreter_out.strip())
facts_from_task_vars = task_vars.get('ansible_facts', {})
# handle interpreter discovery if requested
if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
interpreter_name=interpreter_name,
discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
# a config def does not exist for this interpreter type; consult vars for a possible direct override
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
if interpreter_config not in task_vars:
return None, interpreter
interpreter_out = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter_out
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
class ModuleInfo:
def __init__(self, name, paths):
self.py_src = False
self.pkg_dir = False
path = None
if imp is None:
self._info = info = importlib.machinery.PathFinder.find_spec(name, paths)
if info is not None:
self.py_src = os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES
self.pkg_dir = info.origin.endswith('/__init__.py')
path = info.origin
else:
raise ImportError("No module named '%s'" % name)
else:
self._info = info = imp.find_module(name, paths)
self.py_src = info[2][2] == imp.PY_SOURCE
self.pkg_dir = info[2][2] == imp.PKG_DIRECTORY
if self.pkg_dir:
path = os.path.join(info[1], '__init__.py')
else:
path = info[1]
self.path = path
def get_source(self):
if imp and self.py_src:
try:
return self._info[0].read()
finally:
self._info[0].close()
return _slurp(self.path)
def __repr__(self):
return 'ModuleInfo: py_src=%s, pkg_dir=%s, path=%s' % (self.py_src, self.pkg_dir, self.path)
class CollectionModuleInfo(ModuleInfo):
def __init__(self, name, paths):
self._mod_name = name
self.py_src = True
# FIXME: Implement pkg_dir so that we can place __init__.py files
self.pkg_dir = False
for path in paths:
self._package_name = '.'.join(path.split('/'))
try:
self.get_source()
except FileNotFoundError:
pass
else:
self.path = os.path.join(path, self._mod_name) + '.py'
break
else:
# FIXME (nitz): implement package fallback code
raise ImportError('unable to load collection-hosted module_util'
' {0}.{1}'.format(to_native(self._package_name),
to_native(name)))
def get_source(self):
# FIXME (nitz): need this in py2 for some reason TBD, but we shouldn't (get_data delegates
# to wrong loader without it)
pkg = import_module(self._package_name)
data = pkgutil.get_data(to_native(self._package_name), to_native(self._mod_name + '.py'))
return data
def recursive_finder(name, module_fqn, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs.
:arg name: Name of the python module we're examining
:arg module_fqn: Fully qualified name of the python module we're scanning
:arg py_module_names: set of the fully qualified module names represented as a tuple of their
FQN with __init__ appended if the module is also a python package). Presence of a FQN in
this set means that we've already examined it for module_util deps.
:arg py_module_cache: map python module names (represented as a tuple of their FQN with __init__
appended if the module is also a python package) to a tuple of the code in the module and
the pathname the module would have inside of a Python toplevel (like site-packages)
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder(module_fqn)
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
# FIXME: Do we still need this? It feels like module-utils_loader should include
# _MODULE_UTILS_PATH
module_utils_paths.append(_MODULE_UTILS_PATH)
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0:3] == ('ansible', 'module_utils', 'six'):
# Special case the python six library because it messes with the
# import process in an incompatible way
module_info = ModuleInfo('six', module_utils_paths)
py_module_name = ('ansible', 'module_utils', 'six')
idx = 0
elif py_module_name[0:3] == ('ansible', 'module_utils', '_six'):
# Special case the python six library because it messes with the
# import process in an incompatible way
module_info = ModuleInfo('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('ansible', 'module_utils', 'six', '_six')
idx = 0
elif py_module_name[0] == 'ansible_collections':
# FIXME (nitz): replicate module name resolution like below for granular imports
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
# this is a collection-hosted MU; look it up with pkgutil.get_data()
module_info = CollectionModuleInfo(py_module_name[-idx],
[os.path.join(*py_module_name[:-idx])])
break
except ImportError:
continue
elif py_module_name[0:2] == ('ansible', 'module_utils'):
# Need to remove ansible.module_utils because PluginLoader may find different paths
# for us to look in
relative_module_utils_dir = py_module_name[2:]
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(relative_module_utils_dir) < idx:
break
try:
module_info = ModuleInfo(py_module_name[-idx],
[os.path.join(p, *relative_module_utils_dir[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
else:
# If we get here, it's because of a bug in ModuleDepFinder. If we get a reproducer we
# should then fix ModuleDepFinder
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
% [py_module_name])
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if isinstance(module_info, CollectionModuleInfo):
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# HACK: maybe surface collection dirs in here and use existing find_module code?
normalized_name = py_module_name
normalized_data = module_info.get_source()
normalized_path = os.path.join(*py_module_name)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# HACK: walk back up the package hierarchy to pick up package inits; this won't do the right thing
# for actual packages yet...
accumulated_pkg_name = []
for pkg in py_module_name[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name[:] + ['__init__']) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
normalized_path = os.path.join(*accumulated_pkg_name)
# HACK: possibly preserve some of the actual package file contents; problematic for extend_paths and others though?
normalized_data = ''
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if not module_info.pkg_dir and not module_info.py_src:
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info.pkg_dir:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_data = module_info.get_source()
py_module_cache[normalized_name] = (normalized_data, module_info.path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_data = module_info.get_source()
py_module_cache[normalized_name] = (normalized_data, module_info.path)
normalized_modules.add(normalized_name)
#
# Make sure that all the packages that this module is a part of
# are also added
#
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
# Need to remove ansible.module_utils because PluginLoader may find
# different paths for us to look in
relative_module_utils = py_pkg_name[2:]
pkg_dir_info = ModuleInfo(relative_module_utils[-1],
[os.path.join(p, *relative_module_utils[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (pkg_dir_info.get_source(), pkg_dir_info.path)
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('ansible', 'module_utils', 'basic',) not in py_module_names:
pkg_dir_info = ModuleInfo('basic', module_utils_paths)
normalized_modules.add(('ansible', 'module_utils', 'basic',))
py_module_cache[('ansible', 'module_utils', 'basic',)] = (pkg_dir_info.get_source(), pkg_dir_info.path)
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(py_module_file_name, py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
next_fqn = '.'.join(py_module_file)
recursive_finder(py_module_file[-1], next_fqn, py_module_cache[py_module_file][0],
py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _get_ansible_module_fqn(module_path):
"""
Get the fully qualified name for an ansible module based on its pathname
remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
.. warning:: This function is for ansible modules only. It won't work for other things
(non-module plugins, etc)
"""
remote_module_fqn = None
# Is this a core module?
match = CORE_LIBRARY_PATH_RE.search(module_path)
if not match:
# Is this a module in a collection?
match = COLLECTION_PATH_RE.search(module_path)
# We can tell the FQN for core modules and collection modules
if match:
path = match.group('path')
if '.' in path:
# FQNs must be valid as python identifiers. This sanity check has failed.
# we could check other things as well
raise ValueError('Module name (or path) was not a valid python identifier')
remote_module_fqn = '.'.join(path.split('/'))
else:
# Currently we do not handle modules in roles so we can end up here for that reason
raise ValueError("Unable to determine module's fully qualified name")
return remote_module_fqn
def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(module_path, b_module_data)
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
start = 2
existing_paths = frozenset()
else:
# ... but ansible_collections and other toplevels are not
start = 1
existing_paths = frozenset(zf.namelist())
for idx in range(start, len(module_path_parts)):
package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
# If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
if package_path in existing_paths:
continue
# Note: We don't want to include more than one ansible module in a payload at this time
# so no need to fill the __init__.py with namespace code
zf.writestr(package_path, b'')
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
except ValueError:
# Modules in roles currently are not found by the fqn heuristic so we
# fallback to this. This means that relative imports inside a module from
# a role may fail. Absolute imports should be used for future-proofness.
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
remote_module_fqn = 'ansible.modules.%s' % module_name
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module. See the recursive_finder() documentation for
# more info.
# Here we pre-load it with modules which we create without bothering to
# read from actual files (In some cases, these need to differ from what ansible
# ships because they're namespace packages in the module)
py_module_cache = {
('ansible', '__init__',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n'
b'__version__="' + to_bytes(__version__) +
b'"\n__author__="' + to_bytes(__author__) + b'"\n',
'ansible/__init__.py'),
('ansible', 'module_utils', '__init__',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n',
'ansible/module_utils/__init__.py')}
for (py_module_name, (file_data, filename)) in py_module_cache.items():
zf.writestr(filename, file_data)
# py_module_names keeps track of which modules we've already scanned for
# module_util dependencies
py_module_names.add(py_module_name)
# Returning the ast tree is a temporary hack. We need to know if the module has
# a main() function or not as we are deprecating new-style modules without
# main(). Because parsing the ast is expensive, return it from recursive_finder
# instead of reparsing. Once the deprecation is over and we remove that code,
# also remove returning of the ast tree.
recursive_finder(module_name, remote_module_fqn, b_module_data, py_module_names,
py_module_cache, zf)
display.debug('ANSIBALLZ: Writing module into payload')
_add_module_to_zip(zf, remote_module_fqn, b_module_data)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
if coverage_output:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=coverage_output,
)
else:
# Verify coverage is available without importing it.
# This will detect when a module would fail with coverage enabled with minimal overhead.
coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
module_fqn=remote_module_fqn,
params=python_repred_params,
shebang=shebang,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_path, module_args, environment,
async_timeout, become, become_method, become_user, become_password,
become_flags, module_substyle, task_vars
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
def get_action_args_with_defaults(action, args, defaults, templar):
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# if I actually have defaults, template and merge
if module_defaults:
module_defaults = templar.template(module_defaults)
# deal with configured group defaults first
if action in C.config.module_defaults_groups:
for group in C.config.module_defaults_groups.get(action, []):
tmp_args.update((module_defaults.get('group/{0}'.format(group)) or {}).copy())
# handle specific action defaults
if action in module_defaults:
tmp_args.update(module_defaults[action].copy())
# direct args override all
tmp_args.update(args)
return tmp_args
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/lib/googlecloudsdk/sql/tools/__init__.py
|
2
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""The super-group for the sql CLI.
The fact that this is a directory with
an __init__.py in it makes it a command group. The methods written below will
all be called by calliope (though they are all optional).
"""
import argparse
import os
import re
from googlecloudapis.sqladmin import v1beta1 as sql_v1beta1
from googlecloudapis.sqladmin import v1beta3 as sql_v1beta3
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources as cloud_resources
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.sql import util as util
_ACTIVE_VERSIONS = [
'v1beta3',
'v1beta1',
]
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SQL(base.Group):
"""Manage Cloud SQL databases."""
@staticmethod
def Args(parser):
parser.add_argument(
'--api-version', choices=_ACTIVE_VERSIONS, default='v1beta3',
help=argparse.SUPPRESS)
@exceptions.RaiseToolExceptionInsteadOf(c_store.Error)
def Filter(self, context, args):
"""Context() is a filter function that can update the context.
Args:
context: The current context.
args: The argparse namespace that was specified on the CLI or API.
Returns:
The updated context.
"""
cloud_resources.SetParamDefault(
api='sql', collection=None, param='project',
resolver=resolvers.FromProperty(properties.VALUES.core.project))
url = '/'.join([properties.VALUES.core.api_host.Get(), 'sql'])
http = self.Http()
context['sql_client-v1beta3'] = sql_v1beta3.SqladminV1beta3(
get_credentials=False, url='/'.join([url, 'v1beta3']), http=http)
context['sql_messages-v1beta3'] = sql_v1beta3
context['registry-v1beta3'] = cloud_resources.REGISTRY.CloneAndSwitchAPIs(
context['sql_client-v1beta3'])
context['sql_client-v1beta1'] = sql_v1beta1.SqladminV1beta1(
get_credentials=False, url='/'.join([url, 'v1beta1']), http=http)
context['sql_messages-v1beta1'] = sql_v1beta1
context['registry-v1beta1'] = cloud_resources.REGISTRY.CloneAndSwitchAPIs(
context['sql_client-v1beta1'])
context['sql_client'] = context['sql_client-'+args.api_version]
context['sql_messages'] = context['sql_messages-'+args.api_version]
context['registry'] = context['registry-'+args.api_version]
return context
|
Radium-Devices/Radium_taoshan
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
getredash/redash
|
refs/heads/master
|
redash/handlers/embed.py
|
3
|
from flask import request
from .authentication import current_org
from flask_login import current_user, login_required
from redash import models
from redash.handlers import routes
from redash.handlers.base import get_object_or_404, org_scoped_rule, record_event
from redash.handlers.static import render_index
from redash.security import csp_allows_embeding
@routes.route(
org_scoped_rule("/embed/query/<query_id>/visualization/<visualization_id>"),
methods=["GET"],
)
@login_required
@csp_allows_embeding
def embed(query_id, visualization_id, org_slug=None):
record_event(
current_org,
current_user._get_current_object(),
{
"action": "view",
"object_id": visualization_id,
"object_type": "visualization",
"query_id": query_id,
"embed": True,
"referer": request.headers.get("Referer"),
},
)
return render_index()
@routes.route(org_scoped_rule("/public/dashboards/<token>"), methods=["GET"])
@login_required
@csp_allows_embeding
def public_dashboard(token, org_slug=None):
if current_user.is_api_user():
dashboard = current_user.object
else:
api_key = get_object_or_404(models.ApiKey.get_by_api_key, token)
dashboard = api_key.object
record_event(
current_org,
current_user,
{
"action": "view",
"object_id": dashboard.id,
"object_type": "dashboard",
"public": True,
"headless": "embed" in request.args,
"referer": request.headers.get("Referer"),
},
)
return render_index()
|
Ant-OS/android_packages_apps_OTAUpdates
|
refs/heads/master
|
jni/boost_1_57_0/tools/build/test/load_dir.py
|
64
|
#!/usr/bin/python
"""
Traverses a directory and output the code that would create the same directory
structure during testing. Assumes that the instance of Tester is called 't'.
"""
import sys
import os
import stat
import string
def usage():
print "usage: load_dir.py directory"
def remove_first_component(path):
result = [path]
while 1:
s = os.path.split(result[0])
if not s[0]:
break
result[:1] = list(s)
return apply(os.path.join, result[1:])
def create_file(arg, dirname, fnames):
for n in fnames:
path = os.path.join(dirname, n)
if not os.path.isdir(path):
print "t.write(\"%s\", \"\"\"" % (remove_first_component(path),),
f = open(path, "r")
for l in f:
print l,
print '\n""")\n'
header = """#!/usr/bin/python
# Copyright (C) FILL SOMETHING HERE 2005.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
"""
footer = """
t.run_build_system()
t.expect_addition("bin/$toolset/debug/FILL_SOME_HERE.exe")
t.cleanup()
"""
def main():
if len(sys.argv) != 2:
usage()
else:
path = sys.argv[1]
if not os.access(path, os.F_OK):
print "Path '%s' does not exist" % (path,)
sys.exit(1)
if not os.path.isdir(path):
print "Path '%s' is not a directory" % (path,)
print header
os.path.walk(path, create_file, None)
print footer
if __name__ == '__main__':
main()
|
asedunov/intellij-community
|
refs/heads/master
|
python/testData/completion/moduleDotPy/a.py
|
83
|
from shazam import *
xy<caret>
|
ironman771/xbmc
|
refs/heads/master
|
lib/gtest/scripts/pump.py
|
2471
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
knowsis/django
|
refs/heads/nonrel-1.6
|
django/contrib/messages/__init__.py
|
311
|
from __future__ import absolute_import
from django.contrib.messages.api import *
from django.contrib.messages.constants import *
|
SlashDK/OpenCV-simplestuff
|
refs/heads/master
|
vendors/google.py
|
1
|
import base64
import json
import requests
def _convert_image_to_base64(image_filename):
with open(image_filename, 'rb') as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
return encoded_string
def call_vision_api(image_filename, api_keys):
api_key = api_keys['google']
post_url = "https://vision.googleapis.com/v1/images:annotate?key=" + api_key
base64_image = _convert_image_to_base64(image_filename)
post_payload = {
"requests": [
{
"image": {
"content" : base64_image
},
"features": [
{
"type": "LABEL_DETECTION",
"maxResults": 10
},
{
"type": "FACE_DETECTION",
"maxResults": 10
},
{
"type": "LANDMARK_DETECTION",
"maxResults": 10
},
{
"type": "LOGO_DETECTION",
"maxResults": 10
},
{
"type": "SAFE_SEARCH_DETECTION",
"maxResults": 10
},
]
}
]
}
result = requests.post(post_url, json=post_payload)
result.raise_for_status()
return result.text
# See this function in microsoft.py for docs.
def get_standardized_result(api_result):
output = {
'tags' : [],
}
api_result = api_result['responses'][0]
if 'labelAnnotations' in api_result:
for tag in api_result['labelAnnotations']:
output['tags'].append((tag['description'], tag['score']))
else:
output['tags'].append(('none found', None))
if 'logoAnnotations' in api_result:
output['logo_tags'] = []
for annotation in api_result['logoAnnotations']:
output['logo_tags'].append((annotation['description'], annotation['score']))
return output
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/MockSdk3.4/Lib/collections/abc.py
|
274
|
from _collections_abc import *
from _collections_abc import __all__
|
YJango/tensorflow
|
refs/heads/master
|
Py_version/FNNs_Demo/demoLV2.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/15 13:17
# @Author : zzy824
# @File : demoLV2.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.set_random_seed(55)
np.random.seed(55)
""" add some branched based on demoLV1 """
class FNN(object):
"""Build a general FeedForward neural network
:param
----------
learning_rate: float
drop_out: float
Layers: list
The number of layers
N_hidden: list
The number of nodes in layers
D_input: int
Input dimension
D_label: int
Label dimension
Task_type: string
'regression' or 'classification'
L2_lambda: float
First_Author : YJango; 2016/11/25
Second_Author: zzy824;2018/6/15
"""
def __init__(self, learning_rate, drop_keep, Layers, N_hidden,
D_input, D_label, Task_type='regression', L2_lambda=0.0):
# the whole sharing attribute
self.learning_rate = learning_rate
self.drop_keep = drop_keep
self.Layers = Layers
self.N_hidden = N_hidden
self.D_input = D_input
self.D_label = D_label
# loss function controled by Task_type
self.Task_type = Task_type
# L2 regularizition's strength
self.L2_lambda = L2_lambda
# store L2 regularization for each layer
self.l2_penalty = tf.constant(0.0)
# hid_layers for storing output of all hidden layers
self.hid_layers = []
# W for storing weights of all layers
self.W = []
# b for storing biases of all layers
self.b = []
# total_l2 for storing L2 of all layers
self.total_l2 = []
# those parameters will be define in "build" function
self.train_step = None
self.output = None
self.loss = None
self.accuracy = None
self.total_loss = None
# for generating figures of tensorflow
with tf.name_scope('Input'):
self.inputs = tf.placeholder(tf.float32, [None, D_input], name="inputs")
with tf.name_scope('Label'):
self.labels = tf.placeholder(tf.float32, [None, D_label], name='labels')
with tf.name_scope('keep_rate'):
self.drop_keep_rate = tf.placeholder(tf.float32, name='dropout_keep')
# generate when initialize
self.build('F')
@staticmethod
def weight_init(shape):
"""Initialize weight of neural network and initialization could be changed here
Args:
shape: list [in_dim, out_dim]
Returns:
a Varible which is initialized by random_uniform
"""
initial = tf.random_uniform(shape, minval=-np.sqrt(5) * np.sqrt(1.0 / shape[0]),
maxval=np.sqrt(5) * np.sqrt(1.0 / shape[0]))
return tf.Variable(initial)
@staticmethod
def bias_init(shape):
"""Initialize weight of neural network and initialization could be changed here
Args:
shape: list [in_dim, out_dim]
Returns:
a Varible which is initialize by a constant
"""
# can change initialization here
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
@staticmethod
def variable_summaries(var, name):
"""For recording data in training process
Args:
var: numbers for calculating
name: names for name_scope
"""
# generate two figures display sum and mean
with tf.name_scope(name + '_summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean_' + name, mean)
with tf.name_scope(name + '_stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# record changes in value after each time training
tf.summary.scalar('_stddev_' + name, stddev)
tf.summary.scalar('_max_' + name, tf.reduce_max(var))
tf.summary.scalar('_min_' + name, tf.reduce_min(var))
tf.summary.histogram(name=name, values=var)
def layer(self, in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):
""" a Fuction for establishing each neural layer
Args:
:param in_tensor:
:param in_dim:
:param out_dim:
:param layer_name:
:param act:
:return:
"""
with tf.name_scope(layer_name):
with tf.name_scope(layer_name+'_weights'):
# initialize weight with weight_init()
weights = self.weight_init([in_dim, out_dim])
# self.W will state before usage of this function
self.W.append(weights)
# count weight
self.variable_summaries(weights, layer_name + '_weights')
with tf.name_scope(layer_name + 'biases'):
biases = self.bias_init([out_dim])
# self.b will state before usage of this function
self.b.append(biases)
self.variable_summaries(biases, layer_name + '_biases')
with tf.name_scope(layer_name + '_Wx_plus_b'):
# calculate Wx+b
pre_activate = tf.matmul(in_tensor, weights) + biases
# count histogram
tf.summary.histogram(layer_name + '_pre_activations', pre_activate)
# calculate a(Wx+b)
activations = act(pre_activate, name='activation')
tf.summary.histogram(layer_name + '_activations', activations)
# return with output of this layer and L2_loss of weight
return activations, tf.nn.l2_loss(weights)
def drop_layer(self, in_tensor):
""" dropout layer of nerual network
:param in_tensor:
:return:
"""
# tf.scalar_summary('dropout_keep', self.drop_keep_rate)
dropped = tf.nn.dropout(in_tensor, self.drop_keep_rate)
return dropped
def build(self, prefix):
# build network
# incoming represent the position of current tensor
incoming = self.inputs
# if not hidden layer
if self.Layers != 0:
layer_nodes = [self.D_input] + self.N_hidden
else:
layer_nodes = [self.D_input]
# build hidden layers
for l in range(self.Layers):
# build layers through self.layers and refresh the position of incoming
incoming, l2_loss = self.layer(incoming, layer_nodes[l], layer_nodes[l + 1], prefix + '_hid_' + str(l + 1),
act=tf.nn.relu)
# count l2
self.total_l2.append(l2_loss)
# print some messages of what happened in nerual network
print('Add dense layer: relu with drop_keep:%s' % self.drop_keep)
print(' %sD --> %sD' % (layer_nodes[l], layer_nodes[l + 1]))
# store outputs of hidden layer
self.hid_layers.append(incoming)
# add dropout layer
incoming = self.drop_layer(incoming)
# build output layer as activation functions usually change with specific tasks:
# if the task is regression then we will use tf.identity rather than activation function
if self.Task_type == 'regression':
out_act = tf.identity
else:
# if the task is classification then we will use softmax to fitting probability
out_act = tf.nn.softmax
self.output, l2_loss = self.layer(incoming, layer_nodes[-1], self.D_label, layer_name='output', act=out_act)
print('Add output layer: linear')
print(' %sD --> %sD' % (layer_nodes[-1], self.D_label))
# l2 loss's zoom figure
with tf.name_scope('total_l2'):
for l2 in self.total_l2:
self.l2_penalty += l2
tf.summary.scalar('l2_penalty', self.l2_penalty)
# loss of different figures:
# if task's type is regression, the loss function is for judging difference value
# between prediction and actual value
if self.Task_type == 'regression':
with tf.name_scope('SSE'):
self.loss = tf.reduce_mean((self.output - self.labels) ** 2)
tf.summary.scalar('loss', self.loss)
else:
# if task's type is classification, the loss function is cross entrophy
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.output, labels=self.labels)
with tf.name_scope('cross_entropy'):
self.loss = tf.reduce_mean(entropy)
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', self.accuracy)
# aggregate all losses
with tf.name_scope('total_loss'):
self.total_loss = self.loss + self.l2_penalty * self.L2_lambda
tf.summary.scalar('total_loss', self.total_loss)
# operation of training
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss)
# shuffle function
@staticmethod
def shufflelists(lists):
ri = np.random.permutation(len(lists[1]))
out = []
for l in lists:
out.append(l[ri])
return out
# prepare some data for training "XOR"
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
outputs = [0, 1, 1, 0]
X = np.array(inputs).reshape((4, 1, 2)).astype('int16')
Y = np.array(outputs).reshape((4, 1, 1)).astype('int16')
# generate instance of neural network
ff = FNN(learning_rate=1e-3,
drop_keep=1.0,
Layers=1,
N_hidden=[2],
D_input=2,
D_label=1,
Task_type='regression',
L2_lambda=1e-2)
# loading
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('log' + '/train', sess.graph)
# print weights before training
W0 = sess.run(ff.W[0])
W1 = sess.run(ff.W[1])
print('W_0:\n%s' % sess.run(ff.W[0]))
print('W_1:\n%s' % sess.run(ff.W[1]))
plt.scatter([1, 1, 5], [1, 3, 2], color=['red', 'red', 'blue'], s=200, alpha=0.4, marker='o')
plt.scatter([3, 3], [1, 3], color=['green', 'green'], s=200, alpha=0.4, marker='o')
plt.plot([1, 3], [1, 1], color='orange', linewidth=abs(W0[0, 0]))
plt.annotate('%0.2f' % W0[0, 0], xy=(2, 1.0))
plt.plot([1, 3], [3, 1], color='blue', linewidth=abs(W0[1, 0]))
plt.annotate('%0.2f' % W0[1, 0], xy=(1.5, 1.5))
plt.plot([1, 3], [1, 3], color='blue', linewidth=abs(W0[0, 1]))
plt.annotate('%0.2f' % W0[0, 1], xy=(1.5, 2.5))
plt.plot([1, 3], [3, 3], color='orange', linewidth=abs(W0[1, 1]))
plt.annotate('%0.2f' % W0[1, 1], xy=(2, 3))
plt.plot([3, 5], [1, 2], color='blue', linewidth=abs(W1[0]))
plt.annotate('%0.2f' % W1[0], xy=(4, 1.5))
plt.plot([3, 5], [3, 2], color='blue', linewidth=abs(W1[1]))
plt.annotate('%0.2f' % W1[1], xy=(4, 2.5))
# output before training
pY = sess.run(ff.output, feed_dict={ff.inputs: X.reshape((4, 2)), ff.drop_keep_rate: 1.0})
print(pY)
plt.scatter([0, 1, 2, 3], pY, color=['red', 'green', 'blue', 'black'], s=25, alpha=0.4, marker='o')
# hidden layer's output before training
pY = sess.run(ff.hid_layers[0], feed_dict={ff.inputs:X.reshape((4,2)),ff.drop_keep_rate:1.0})
print(pY)
plt.scatter(pY[:, 0], color=['red', 'green', 'blue', 'black'], s=25, alpha=0.4, marker='o')
# training section and record
k = 0.0
for i in range(10000):
k += 1
summary, _ = sess.run([merged, ff.train_step], feed_dict={ff.inputs: X.reshape((4,2)),ff.labels: Y.reshape((4, 1)), ff.drop_keep_rate: 1.0})
train_writer.add_summary(summary, k)
# weights after training
W0 = sess.run(ff.W[0])
W1 = sess.run(ff.W[1])
print('W_0:\n%s' % sess.run(ff.W[0]))
print('W_1:\n%s' % sess.run(ff.W[1]))
plt.scatter([1, 1, 5],[1, 3, 2], color=['red', 'red', 'blue'], s=200, alpha=0.4, marker='o')
plt.scatter([3, 3], [1, 3], color=['green', 'green'], s=200, alpha=0.4, marker='o')
plt.plot([1, 3], [1, 1], color='orange', linewidth=abs(W0[0, 0]))
plt.annotate('%0.2f' % W0[0, 0], xy=(2, 1.0))
plt.plot([1, 3], [3, 1], color='blue', linewidth=abs(W0[1, 0]))
plt.annotate('%0.2f' % W0[1, 0], xy=(1.5, 1.5))
plt.plot([1, 3], [1, 3], color='blue', linewidth=abs(W0[0, 1]))
plt.annotate('%0.2f' % W0[0, 1], xy=(1.5, 2.5))
plt.plot([1, 3], [3, 3], color='orange', linewidth=abs(W0[1, 1]))
plt.annotate('%0.2f' % W0[1, 1], xy=(2, 3))
plt.plot([3, 5], [1, 2], color='blue', linewidth=abs(W1[0]))
plt.annotate('%0.2f' % W1[0], xy=(4, 1.5))
plt.plot([3, 5], [3, 2],color='blue', linewidth=abs(W1[1]))
plt.annotate('%0.2f' % W1[1], xy=(4, 2.5))
# output after training
pY = sess.run(ff.output, feed_dict={ff.inputs: X.reshape((4, 2)), ff.drop_keep_rate: 1.0})
print(pY)
plt.scatter([0, 1, 2, 3], pY, color=['red', 'green', 'blue', 'black'], s=25, alpha=0.4, marker='o')
|
Bismarrck/tensorflow
|
refs/heads/master
|
tensorflow/contrib/layers/python/layers/initializers.py
|
23
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weight initializers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
__all__ = ['xavier_initializer', 'xavier_initializer_conv2d',
'variance_scaling_initializer']
def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
"""Returns an initializer performing "Xavier" initialization for weights.
This function implements the weight initialization from:
Xavier Glorot and Yoshua Bengio (2010):
[Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.](
http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
This initializer is designed to keep the scale of the gradients roughly the
same in all layers. In uniform distribution this ends up being the range:
`x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard
deviation of `sqrt(2. / (in + out))` is used.
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer for a weight matrix.
"""
return variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=uniform, seed=seed, dtype=dtype)
xavier_initializer_conv2d = xavier_initializer
def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
seed=None, dtype=dtypes.float32):
"""Returns an initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. This initializer use the following formula:
```python
if mode='FAN_IN': # Count only number of input connections.
n = fan_in
elif mode='FAN_OUT': # Count only number of output connections.
n = fan_out
elif mode='FAN_AVG': # Average number of inputs and output connections.
n = (fan_in + fan_out)/2.0
truncated_normal(shape, 0.0, stddev=sqrt(factor / n))
```
* To get [Delving Deep into Rectifiers](
http://arxiv.org/pdf/1502.01852v1.pdf) (also know as the "MSRA
initialization"), use (Default):<br/>
`factor=2.0 mode='FAN_IN' uniform=False`
* To get [Convolutional Architecture for Fast Feature Embedding](
http://arxiv.org/abs/1408.5093), use:<br/>
`factor=1.0 mode='FAN_IN' uniform=True`
* To get [Understanding the difficulty of training deep feedforward neural
networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf),
use:<br/>
`factor=1.0 mode='FAN_AVG' uniform=True.`
* To get `xavier_initializer` use either:<br/>
`factor=1.0 mode='FAN_AVG' uniform=True`, or<br/>
`factor=1.0 mode='FAN_AVG' uniform=False`.
Args:
factor: Float. A multiplicative factor.
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with unit variance.
Raises:
ValueError: if `dtype` is not a floating point type.
TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].
"""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:
raise TypeError('Unknown mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode)
# pylint: disable=unused-argument
def _initializer(shape, dtype=dtype, partition_info=None):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
if mode == 'FAN_IN':
# Count only number of input connections.
n = fan_in
elif mode == 'FAN_OUT':
# Count only number of output connections.
n = fan_out
elif mode == 'FAN_AVG':
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * factor / n)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=seed)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * factor / n)
return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype,
seed=seed)
# pylint: enable=unused-argument
return _initializer
|
iffy/AutobahnPython
|
refs/heads/master
|
doc/conf.py
|
10
|
# -*- coding: utf-8 -*-
import os
import sys
import sphinx_bootstrap_theme
# only needed for Autobahn|Python
sys.path.insert(0, os.path.abspath('./_extensions'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
'sphinxcontrib.spelling',
'txsphinx' # only needed for Autobahn|Python
]
spelling_lang = 'en_US'
spelling_show_suggestions = False
spelling_word_list_filename = 'spelling_wordlist.txt'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AutobahnPython'
copyright = u'Tavendo GmbH'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
init = {}
with open(os.path.join(base_dir, "autobahn", "__init__.py")) as f:
exec(f.read(), init)
version = release = init["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'work']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
## Sphinx-Bootstrap Theme
##
## http://sphinx-bootstrap-theme.readthedocs.org/en/latest/README.html
##
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': " ",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
#("Examples", "examples"),
#("Link", "http://example.com", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
## additional variables which become accessible in RST (e.g. .. ifconfig:: not no_network)
##
def setup(app):
app.add_config_value('no_network', False, True)
no_network = None
## additional variables which become accessible in the template engine's
## context for all pages
##
html_context = {
#'widgeturl': 'https://demo.crossbar.io/clandeckwidget'
#'widgeturl': 'http://127.0.0.1:8090/widget'
'widgeturl': None,
'no_network': False,
#'cstatic': 'http://127.0.0.1:8888',
'cstatic': '//tavendo-common-static.s3-eu-west-1.amazonaws.com',
}
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = None
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'side-primary.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AutobahnPython'
# http://sphinx-doc.org/ext/intersphinx.html
intersphinx_mapping = {
'py2': ('http://docs.python.org/2', None),
'py3': ('http://docs.python.org/3', None),
'six': ('https://pythonhosted.org/six/', None),
}
rst_epilog = """
.. |ab| replace:: Autobahn
.. |Ab| replace:: **Autobahn**
.. |abL| replace:: Autobahn|Python
.. |AbL| replace:: **Autobahn**\|Python
.. _Autobahn: http://autobahn.ws
.. _AutobahnJS: http://autobahn.ws/js
.. _AutobahnPython: **Autobahn**\|Python
.. _WebSocket: http://tools.ietf.org/html/rfc6455
.. _RFC6455: http://tools.ietf.org/html/rfc6455
.. _WAMP: http://wamp.ws/
.. _Twisted: http://twistedmatrix.com/
.. _asyncio: http://docs.python.org/3.4/library/asyncio.html
.. _CPython: http://python.org/
.. _PyPy: http://pypy.org/
.. _Jython: http://jython.org/
.. _WAMP: http://wamp.ws/
.. _WAMPv1: http://wamp.ws/spec/wamp1/
.. _WAMPv2: https://github.com/tavendo/WAMP/blob/master/spec/README.md
.. _AutobahnTestsuite: http://autobahn.ws/testsuite
.. _trollius: https://pypi.python.org/pypi/trollius/
.. _tulip: https://pypi.python.org/pypi/asyncio/
"""
rst_prolog = """
"""
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
autoclass_content = 'both'
autodoc_member_order = 'bysource'
|
natoinet/seoscraper
|
refs/heads/master
|
seocheck.py
|
1
|
import argparse
from datetime import datetime
from envparse import env
import psycopg2
import scrapy
from scrapy.utils.project import get_project_settings
from scrapy.utils.log import configure_logging
from scrapy.crawler import CrawlerProcess, CrawlerRunner
from core.base import delete_table, db_to_csv
env.read_envfile()
SCRAPY_SETTINGS_MODULE = env.str('SCRAPY_SETTINGS_MODULE')
parser = argparse.ArgumentParser(description='Check a domain and extracts the url list and pagemap.')
parser.add_argument('resultpath', type=str, help='Result path')
parser.add_argument('-d', '--domains', type=str, help='domain to check')
parser.add_argument('-s', '--sitemaps', type=str, help='website to check', default=None) #Optional argument
parser.add_argument('-u', '--urls', type=str, help='Source file with an input list of urls if any', default=None) #Optional argument
parser.add_argument('-f', '--follow', help='if pagemap is required', action="store_true") #Optional argument
parser.add_argument('-r', '--resources', help='crawls all files', action="store_true") #Optional argument
parser.add_argument('-p', '--pagemap', help='if pagemap is required', action="store_true") #Optional argument
args = parser.parse_args()
print(args.domains, args.sitemaps, args.resultpath, args.urls, args.follow, args.pagemap, args.resources)
delete_table("urljson")
delete_table("pagemap")
delete_table("redirections")
# Crawl
configure_logging({'LOG_LEVEL' : 'DEBUG'})
process = CrawlerProcess(settings=get_project_settings())
process.crawl('minime_html', domains=args.domains, urls=args.urls,sitemaps=args.sitemaps,
follow=args.follow, resources=args.resources, links=args.pagemap)
process.start()
# Export => Combine results from pagemap & urllist
query = """select url, doc->'canonical' as canonical, status, doc->'robots' as robots, content_type, content_size, doc->'referer' as referer, doc->'title' as title, doc->'desc' as desc, doc->'h1' as h1, doc->'redirect_urls' as redirect_urls, doc->'redirect_status' as redir_status, doc->'redirections' as nb_redir from urljson"""
db_to_csv(query, args.resultpath + 'seocheck-' + str(datetime.now()) + '.csv')
query = """select pagemap.url, pagemap.link, pagemap.link_final, urljson.status from pagemap, urljson where pagemap.link=urljson.url or pagemap.link_final=urljson.url"""
db_to_csv(query, args.resultpath + 'seocheck-pagemap-' + str(datetime.now()) + '.csv')
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/site-packages/django/contrib/gis/geos/prototypes/predicates.py
|
103
|
"""
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# ## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args:
argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
# ## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
# ## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
|
Arcanemagus/SickRage
|
refs/heads/master
|
lib/hachoir_parser/misc/mstask.py
|
74
|
"""
ms task/job file parser
Author: Jeff Bryner
Creation date: 2010-11
References:
http://msdn.microsoft.com/en-us/library/cc248286%28v=PROT.13%29.aspx
http://msdn.microsoft.com/en-us/library/cc248287%28v=PROT.13%29.aspx
http://technet.microsoft.com/en-us/library/bb490996.aspx
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, RootSeekableFieldSet,
CString, String, PascalString16,
UInt32, UInt16, UInt8,
Bit, Bits, PaddingBits,
TimestampWin64, DateTimeMSDOS32,
NullBytes, PaddingBytes, RawBits, RawBytes, Enum)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.common.win32 import PascalStringWin16, GUID
from hachoir_parser.common.msdos import MSDOSFileAttr16, MSDOSFileAttr32
from hachoir_core.text_handler import filesizeHandler
class TaskTrigger(FieldSet):
TRIGGER_TYPE = {
0x00000000: "ONCE",
0x00000001: "DAILY",
0x00000002: "WEEKLY",
0x00000003: "MONTHLYDATE",
0x00000004: "MONTHLYDOW",
0x00000005: "EVENT_ON_IDLE",
0x00000006: "EVENT_AT_SYSTEMSTART",
0x00000007: "EVENT_AT_LOGON"
}
def __init__(self, *args, **kwargs):
FieldSet.__init__(self, *args, **kwargs)
self._size = self["TriggerSize"].value * 8
def createFields(self):
yield UInt16(self, "TriggerSize")
yield UInt16(self, "Reserved[]")
yield UInt16(self, "BeginYear")
yield UInt16(self, "BeginMonth")
yield UInt16(self, "BeginDay")
yield UInt16(self, "EndYear")
yield UInt16(self, "EndMonth")
yield UInt16(self, "EndDay")
yield UInt16(self, "StartHour")
yield UInt16(self, "StartMinute")
yield UInt32(self, "MinutesDuration")
yield UInt32(self, "MinutesInterval","Time period between repeated trigger firings.")
yield Bit(self, "HasEndDate","Can task stop at some point in time?")
yield Bit(self, "KillAtDurationEnd","Can task be stopped at the end of the repetition period?")
yield Bit(self, "TriggerDisabled","Is this trigger disabled?")
yield RawBits(self, "Unused[]", 29)
yield Enum(UInt32(self, "TriggerType"),self.TRIGGER_TYPE)
yield UInt16(self, "TriggerSpecific0")
yield UInt16(self, "TriggerSpecific1")
yield UInt16(self, "TriggerSpecific2")
yield UInt16(self, "Padding")
yield UInt16(self, "Reserved[]")
yield UInt16(self, "Reserved[]")
class MSTaskFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "mstask",
"category": "misc", # "archive", "audio", "container", ...
"file_ext": ("job",), # TODO: Example ("bmp",) to parse the file "image.bmp"
"min_size": 100, # TODO: Minimum file size (x bits, or x*8 in bytes)
"description": ".job 'at' file parser from ms windows", # TODO: Example: "A bitmap picture",
}
endian = LITTLE_ENDIAN
PRODUCT_VERSION = {
0x0400: "Windows NT 4.0",
0x0500: "Windows 2000",
0x0501: "Windows XP",
0x0600: "Windows Vista",
0x0601: "Windows 7"
}
TASK_STATUS = {
0x00041300: "Task Ready",
0x00041301: "Task running",
0x00041302: "Task disabled",
0x00041303: "Task has not run",
0x00041304: "Task has no more runs",
0x00041305: "Task not scheduled",
0x00041306: "Task terminated",
0x00041307: "Task has no valid triggers",
0x00041308: "Task contains only event triggers that do not have set run times",
0x00041309: "Task trigger not found",
0x0004130A: "One or more of the properties that are required to run this task have not been set.",
0x0004130B: "There is no running instance of the task",
0x0004130C: "Task Schedule Remoting Protocol service is not installed",
0x0004130D: "Task object cannot be opened",
0x0004130E: "Task object is invalid",
0x0004130F: "No Account information could be found in Task Scheduler Remoting Protocol security database for the task indicated."
}
def validate(self):
# The MAGIC for a task file is the windows version that created it
# http://msdn.microsoft.com/en-us/library/2d1fbbab-fe6c-4ae5-bdf5-41dc526b2439%28v=PROT.13%29#id11
if self['WindowsVersion'].value not in self.PRODUCT_VERSION:
return "Invalid Product Version Field"
return True
def createFields(self):
yield Enum(UInt16(self, "WindowsVersion"), self.PRODUCT_VERSION)
yield UInt16(self, "FileVersion")
yield GUID(self, "JobUUID")
yield UInt16(self, "AppNameOffset", "App Name Length Offset")
yield UInt16(self, "TriggerOffset", "Contains the offset in bytes within the .JOB file where the task triggers are located.")
yield UInt16(self, "ErrorRetryCount", "Contains the number of execute attempts that are attempted for the task if the task fails to start.")
yield UInt16(self, "ErrorRetryInterval", "Contains the interval, in minutes, between successive retries")
yield UInt16(self, "IdleDeadline", "Contains a maximum time in minutes to wait for the machine to become idle for Idle Wait minutes.")
yield UInt16(self, "IdleWait", "Contains a value in minutes. The machine remains idle for this many minutes before it runs the task")
yield UInt32(self, "Priority")
yield UInt32(self, "MaxRunTime", "Maximum run time in milliseconds")
yield UInt32(self, "ExitCode", "This contains the exit code of the executed task upon the completion of that task.")
yield Enum(UInt32(self, "Status"), self.TASK_STATUS)
yield Bit(self, "Interactive", "Can Task interact with user?")
yield Bit(self, "DeleteWhenDone", "Remove the task file when done?")
yield Bit(self, "Disabled", "Is Task disabled?")
yield Bit(self, "StartOnlyIfIdle", "Task begins only if computer is not in use at the scheduled time")
yield Bit(self, "KillOnIdleEnd", "Kill task if user input is detected, terminating idle state?")
yield Bit(self, "DontStartIfOnBatteries")
yield Bit(self, "KillIfGoingOnBatteries")
yield Bit(self, "RunOnlyIfDocked")
yield Bit(self, "HiddenTask")
yield Bit(self, "RunIfConnectedToInternet")
yield Bit(self, "RestartOnIdleResume")
yield Bit(self, "SystemRequired", "Can task cause system to resume or awaken if system is sleeping?")
yield Bit(self, "OnlyIfUserLoggedOn")
yield Bit(self, "ApplicationNameExists", "Does task have an application name defined?")
yield Bit(self, "Unused[]")
yield Bit(self, "Unused[]")
yield RawBytes(self, "flags", 2)
yield UInt16(self, "LastRunYear")
yield UInt16(self, "LastRunMonth")
yield UInt16(self, "LastRunWeekday", "Sunday=0,Saturday=6")
yield UInt16(self, "LastRunDay")
yield UInt16(self, "LastRunHour")
yield UInt16(self, "LastRunMinute")
yield UInt16(self, "LastRunSecond")
yield UInt16(self, "LastRunMillisecond")
yield UInt16(self, "RunningInstanceCount")
yield PascalStringWin16(self, "AppNameLength", strip='\0')
yield PascalStringWin16(self, "Parameters", strip='\0')
yield PascalStringWin16(self, "WorkingDirectory", strip='\0')
yield PascalStringWin16(self, "Author", strip='\0')
yield PascalStringWin16(self, "Comment", strip='\0')
yield UInt16(self, "UserDataSize")
#todo: read optional userdata
yield UInt16(self, "ReservedDataSize")
if self["ReservedDataSize"].value==8:
yield Enum(UInt32(self, "StartError", "contains the HRESULT error from the most recent attempt to start the task"), self.TASK_STATUS)
yield UInt32(self, "TaskFlags")
elif self["ReservedDataSize"].value:
yield RawBytes(self, "Reserved", self["ReservedDataSize"].value)
yield UInt16(self, "TriggerCount", "size of the array of triggers")
for i in xrange(self["TriggerCount"].value):
yield TaskTrigger(self, "Trigger[]")
|
szeged/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/h2/examples/twisted/head_request.py
|
25
|
# -*- coding: utf-8 -*-
"""
head_request.py
~~~~~~~~~~~~~~~
A short example that demonstrates a client that makes HEAD requests to certain
websites.
This example is intended as a reproduction of nghttp2 issue 396, for the
purposes of compatibility testing.
"""
from __future__ import print_function
from twisted.internet import reactor
from twisted.internet.endpoints import connectProtocol, SSL4ClientEndpoint
from twisted.internet.protocol import Protocol
from twisted.internet.ssl import optionsForClientTLS
from hyperframe.frame import SettingsFrame
from h2.connection import H2Connection
from h2.events import (
ResponseReceived, DataReceived, StreamEnded,
StreamReset, SettingsAcknowledged,
)
AUTHORITY = u'http2bin.org'
PATH = '/'
SIZE = 4096
class H2Protocol(Protocol):
def __init__(self):
self.conn = H2Connection()
self.known_proto = None
self.request_made = False
def connectionMade(self):
self.conn.initiate_connection()
# This reproduces the error in #396, by changing the header table size.
self.conn.update_settings({SettingsFrame.HEADER_TABLE_SIZE: SIZE})
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
if not self.known_proto:
self.known_proto = self.transport.negotiatedProtocol
assert self.known_proto == b'h2'
events = self.conn.receive_data(data)
for event in events:
if isinstance(event, ResponseReceived):
self.handleResponse(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.handleData(event.data, event.stream_id)
elif isinstance(event, StreamEnded):
self.endStream(event.stream_id)
elif isinstance(event, SettingsAcknowledged):
self.settingsAcked(event)
elif isinstance(event, StreamReset):
reactor.stop()
raise RuntimeError("Stream reset: %d" % event.error_code)
else:
print(event)
data = self.conn.data_to_send()
if data:
self.transport.write(data)
def settingsAcked(self, event):
# Having received the remote settings change, lets send our request.
if not self.request_made:
self.sendRequest()
def handleResponse(self, response_headers, stream_id):
for name, value in response_headers:
print("%s: %s" % (name.decode('utf-8'), value.decode('utf-8')))
print("")
def handleData(self, data, stream_id):
print(data, end='')
def endStream(self, stream_id):
self.conn.close_connection()
self.transport.write(self.conn.data_to_send())
self.transport.loseConnection()
reactor.stop()
def sendRequest(self):
request_headers = [
(':method', 'HEAD'),
(':authority', AUTHORITY),
(':scheme', 'https'),
(':path', PATH),
('user-agent', 'hyper-h2/1.0.0'),
]
self.conn.send_headers(1, request_headers, end_stream=True)
self.request_made = True
options = optionsForClientTLS(
hostname=AUTHORITY,
acceptableProtocols=[b'h2'],
)
connectProtocol(
SSL4ClientEndpoint(reactor, AUTHORITY, 443, options),
H2Protocol()
)
reactor.run()
|
thebruce87/Photobooth
|
refs/heads/master
|
src/button.py
|
2
|
import RPi.GPIO as GPIO
isInit = False
def init(mode = GPIO.BCM):
GPIO.setmode(mode)
global isInit
isInit = True
class Button:
def __init__(self, gpio, direction = GPIO.IN):
self.gpio = gpio
self.direction = direction
GPIO.setup(self.gpio, self.direction)
def isPressed(self):
return not GPIO.input(self.gpio)
|
davidyezsetz/kuma
|
refs/heads/master
|
vendor/packages/nose/functional_tests/support/idp/tests.py
|
10
|
import unittest
def test_a():
pass
def test_b():
raise TypeError("I am typeless")
def test_c():
assert False, "I am contrary"
def test_gen():
def tryit(i):
pass
for i in range(0, 4):
yield tryit, i
class TestCase(unittest.TestCase):
def test_a(self):
pass
def test_b(self):
pass
class TestCls:
def test_a(self):
pass
def test_gen(self):
def tryit(i):
pass
for i in range(0, 4):
yield tryit, i
def test_z(self):
pass
|
stefanv/aandete
|
refs/heads/master
|
app/lib/waitress/buffers.py
|
6
|
##############################################################################
#
# Copyright (c) 2001-2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Buffers
"""
from io import BytesIO
# copy_bytes controls the size of temp. strings for shuffling data around.
COPY_BYTES = 1 << 18 # 256K
# The maximum number of bytes to buffer in a simple string.
STRBUF_LIMIT = 8192
class FileBasedBuffer(object):
remain = 0
def __init__(self, file, from_buffer=None):
self.file = file
if from_buffer is not None:
from_file = from_buffer.getfile()
read_pos = from_file.tell()
from_file.seek(0)
while True:
data = from_file.read(COPY_BYTES)
if not data:
break
file.write(data)
self.remain = int(file.tell() - read_pos)
from_file.seek(read_pos)
file.seek(read_pos)
def __len__(self):
return self.remain
def __nonzero__(self):
return True
__bool__ = __nonzero__ # py3
def append(self, s):
file = self.file
read_pos = file.tell()
file.seek(0, 2)
file.write(s)
file.seek(read_pos)
self.remain = self.remain + len(s)
def get(self, numbytes=-1, skip=False):
file = self.file
if not skip:
read_pos = file.tell()
if numbytes < 0:
# Read all
res = file.read()
else:
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def skip(self, numbytes, allow_prune=0):
if self.remain < numbytes:
raise ValueError("Can't skip %d bytes in buffer of %d bytes" % (
numbytes, self.remain)
)
self.file.seek(numbytes, 1)
self.remain = self.remain - numbytes
def newfile(self):
raise NotImplementedError()
def prune(self):
file = self.file
if self.remain == 0:
read_pos = file.tell()
file.seek(0, 2)
sz = file.tell()
file.seek(read_pos)
if sz == 0:
# Nothing to prune.
return
nf = self.newfile()
while True:
data = file.read(COPY_BYTES)
if not data:
break
nf.write(data)
self.file = nf
def getfile(self):
return self.file
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
self.remain = 0
class TempfileBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
def newfile(self):
from tempfile import TemporaryFile
return TemporaryFile('w+b')
class BytesIOBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
if from_buffer is not None:
FileBasedBuffer.__init__(self, BytesIO(), from_buffer)
else:
# Shortcut. :-)
self.file = BytesIO()
def newfile(self):
return BytesIO()
class ReadOnlyFileBasedBuffer(FileBasedBuffer):
# used as wsgi.file_wrapper
def __init__(self, file, block_size=32768):
self.file = file
self.block_size = block_size # for __iter__
def prepare(self, size=None):
if hasattr(self.file, 'seek') and hasattr(self.file, 'tell'):
start_pos = self.file.tell()
self.file.seek(0, 2)
end_pos = self.file.tell()
self.file.seek(start_pos)
fsize = end_pos - start_pos
if size is None:
self.remain = fsize
else:
self.remain = min(fsize, size)
return self.remain
def get(self, numbytes=-1, skip=False):
# never read more than self.remain (it can be user-specified)
if numbytes == -1 or numbytes > self.remain:
numbytes = self.remain
file = self.file
if not skip:
read_pos = file.tell()
res = file.read(numbytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def __iter__(self): # called by task if self.filelike has no seek/tell
return self
def next(self):
val = self.file.read(self.block_size)
if not val:
raise StopIteration
return val
__next__ = next # py3
def append(self, s):
raise NotImplementedError
class OverflowableBuffer(object):
"""
This buffer implementation has four stages:
- No data
- Bytes-based buffer
- BytesIO-based buffer
- Temporary file storage
The first two stages are fastest for simple transfers.
"""
overflowed = False
buf = None
strbuf = b'' # Bytes-based buffer.
def __init__(self, overflow):
# overflow is the maximum to be stored in a StringIO buffer.
self.overflow = overflow
def __len__(self):
buf = self.buf
if buf is not None:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
return buf.__len__()
else:
return self.strbuf.__len__()
def __nonzero__(self):
# use self.__len__ rather than len(self) FBO of not getting
# OverflowError on Python 2
return self.__len__() > 0
__bool__ = __nonzero__ # py3
def _create_buffer(self):
strbuf = self.strbuf
if len(strbuf) >= self.overflow:
self._set_large_buffer()
else:
self._set_small_buffer()
buf = self.buf
if strbuf:
buf.append(self.strbuf)
self.strbuf = b''
return buf
def _set_small_buffer(self):
self.buf = BytesIOBasedBuffer(self.buf)
self.overflowed = False
def _set_large_buffer(self):
self.buf = TempfileBasedBuffer(self.buf)
self.overflowed = True
def append(self, s):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if len(strbuf) + len(s) < STRBUF_LIMIT:
self.strbuf = strbuf + s
return
buf = self._create_buffer()
buf.append(s)
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if not self.overflowed:
if sz >= self.overflow:
self._set_large_buffer()
def get(self, numbytes=-1, skip=False):
buf = self.buf
if buf is None:
strbuf = self.strbuf
if not skip:
return strbuf
buf = self._create_buffer()
return buf.get(numbytes, skip)
def skip(self, numbytes, allow_prune=False):
buf = self.buf
if buf is None:
if allow_prune and numbytes == len(self.strbuf):
# We could slice instead of converting to
# a buffer, but that would eat up memory in
# large transfers.
self.strbuf = b''
return
buf = self._create_buffer()
buf.skip(numbytes, allow_prune)
def prune(self):
"""
A potentially expensive operation that removes all data
already retrieved from the buffer.
"""
buf = self.buf
if buf is None:
self.strbuf = b''
return
buf.prune()
if self.overflowed:
# use buf.__len__ rather than len(buf) FBO of not getting
# OverflowError on Python 2
sz = buf.__len__()
if sz < self.overflow:
# Revert to a faster buffer.
self._set_small_buffer()
def getfile(self):
buf = self.buf
if buf is None:
buf = self._create_buffer()
return buf.getfile()
def close(self):
buf = self.buf
if buf is not None:
buf.close()
|
run2/citytour
|
refs/heads/master
|
4symantec/Scripts/activate_this.py
|
1076
|
"""By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
|
P1d0f/encryptGen
|
refs/heads/master
|
encryption-generator.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# encryption-generator.py
#
# Copyright 2016 Netuser <zorgonteam@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# encryption-generator.py Version 2.0
# site http://zorgonteam.wordpress.com
import os
import sys
import time
import base64
import urllib
import hashlib
import subprocess
from datetime import date
from datetime import datetime
from Crypto.Cipher import DES
from Crypto import Random
date=date.today()
now=datetime.now()
if os.name in ['nt','win32']:
os.system('cls')
else:
os.system('clear')
print "[*] Author Netuser [*]"
print "[*] encryption generator [*]"
print "[*] date :",date," [*]"
print
print "[*] Encrypt With Strong Crypto is Coming soon"
back = 'back'
#while back == 'back':
while True:
try:
menu=raw_input('\n[*] encrypt or decrypt $ ')
menu_item="update"
if menu_item == menu:
print "[*] Updating Databases Information .... "
url=urllib.urlretrieve("https://raw.githubusercontent.com/P1d0f/encryptGen/master/encryption-generator.py","encryption-generator.py")
print "[*] Update Succesfully"
sys.exit()
menu_item="help"
if menu == menu_item:
print """
you just type encrypt or decrypt
example :
encrypt = encrypt or decrypt $ encrypt (enter)
decrypt = encrypt or decrypt $ decrypt (enter)
"""
menu_item="encrypt"
if menu == menu_item:
print
print "----> md5"
print "----> sha1"
print "----> sha224"
print "----> sha256"
print "----> sha384"
print "----> sha512"
print "----> base16"
print "----> base32"
print "----> base64"
print "----> cryptoDES"
print
raw=raw_input('[*] type and choice one $ ')
menu_item="exit"
if raw == menu_item:
print "[*] thanks for shopping"
sys.exit()
menu_item="cryptoDES"
if menu_item == raw:
telo=raw_input('[*] your text $ ')
iv=Random.get_random_bytes(8)
des1=DES.new('01234567', DES.MODE_CFB, iv)
des2=DES.new('01234567', DES.MODE_CFB, iv)
text=telo
cipher_text=des2.encrypt(text)
nama_file=open('text.encrypt','w')
nama_file.writelines(cipher_text)
nama_file.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] saved into text.encrypt"
menu_item="base16"
if menu_item == raw:
telo=raw_input('[*] text $ ')
base16=base64.b16encode('%s' % (telo))
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] result :",base16
menu_item="sha224"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha224=hashlib.sha224('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha224
menu_item="sha384"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha384=hashlib.sha384('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha384
menu_item="sha512"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha512=hashlib.sha512('%s' % (telo)).hexdigest()
for i in(5,4,3,2,1):
print "[*] encrypted at",now
print "\n[*] result :",sha512
menu_item="base64"
if menu_item == raw:
telo=raw_input('[*] text $ ')
base64=base64.b64encode('%s' % (telo))
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] result :",base64
menu_item="md5"
if menu_item == raw:
telo=raw_input('[*] text $ ')
md5=hashlib.md5('%s' % (telo)).hexdigest()
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",md5
menu_item="sha256"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha256=hashlib.sha256('%s' % (telo)).hexdigest()
print
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",sha256
menu_item="sha1"
if menu_item == raw:
telo=raw_input('[*] text $ ')
sha1=hashlib.sha1('%s' % (telo)).hexdigest()
print
for i in(1,2,3,4,5):
print "[*] encrypted at",now
print "\n[*] result :",sha1
menu_item="base32"
if menu_item == raw:
ff=raw_input('[*] text or file $ ')
menu_fuck="text"
if menu_fuck == ff:
telo=raw_input('text $ ')
base32=base64.b32encode('%s' % (telo))
print
for i in(1,2,3,4,5):
print "[*] encoded at",now
print "\n[*] result :",base32
menu_ss="file"
if menu_ss == ff:
try:
print "[*] WARNING : if you encrypt this file your file original will be remove !"
fileno=raw_input('\n[*] file to encrypt $ ')
baca=open('%s' % (fileno), 'r')
ss=baca.read()
decrypt=base64.b32encode(ss)
simpan=open('text.enc','w')
simpan.writelines(decrypt)
simpan.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] encoded at",now
print "\n[*] saved to text.enc"
os.remove(fileno)
except IOError:
print "\n[*] no file found",fileno
sys.exit()
menu_telo="decrypt"
if menu_telo == menu:
print
print "----> base16"
print "----> base32"
print "----> base64"
print "----> cryptoDES"
print
oke=raw_input('[*] type and choice one $ ')
menu_telo="cryptoDES"
if menu_telo == oke:
try:
telo=raw_input('[*] file.encrypt : ')
iv=Random.get_random_bytes(8)
des1=DES.new('01234567', DES.MODE_CFB, iv)
des2=DES.new('01234567', DES.MODE_CFB, iv)
nama_file=open('%s' % (telo),'r')
ss=nama_file.read()
decs=des2.decrypt(ss)
save1=open('text.decrypt','w')
save1.writelines(decs)
save1.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] decrypted at",now
print "\n[*] saved file text.decrypt"
except IOError:
print "\n[*] Not found file encrypt",telo
menu_telo="base16"
if oke == menu_telo:
raw1=raw_input('[*] text base16 $ ')
dec16=base64.b16decode('%s' % (raw1))
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] result :",dec16
menu_telo="base32"
if oke == menu_telo:
ss=raw_input('[*] text or file $ ')
menu_gg="text"
if menu_gg == ss:
raw2=raw_input('[*] text base32 $ ')
print
dec32=base64.b32decode('%s' % (raw2))
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] result :",dec32
menu_hh="file"
if menu_hh == ss:
try:
fileno=raw_input('[*] file text.enc $ ')
print
fuck=open('%s' % (fileno), 'r')
anjir=fuck.read()
dec43=base64.b32decode(anjir)
telo=open('text.dec','w')
telo.writelines(dec43)
telo.close()
time.sleep(2)
for i in(5,4,3,2,1):
print "[*] decoded at",now
print "\n[*] save file text.dec"
os.remove(fileno)
except:
print "[*] Not found file enc "
menu_telo="base64" #this is Bug Sorry
if oke == menu_telo:#
raw3=raw_input('[*] text base64 $ ')#
dec64=base64.b64decode('%s' % (raw3))#
for i in (5,4,3,2,1):#
print "[*] decoded at",now#
print "\n[*] result :",dec64#
menu_telo="exit"
if menu_telo == oke:
print "[*] thanks for shopping"
sys.exit()
menu_item="exit"
if menu == menu_item:
print "[*] thanks for shopping"
sys.exit()
except KeyboardInterrupt:
print "\n[*] ctrl+c active "
sys.exit()
##### Finished #################################### Finished ##################
###############################################################################
#the Bug is cannot decrypt crypto encryption but i will try to repair and make#
#progam is the best ever #you can wait this progam to be version 2.0 #
|
matuu/pyafipws
|
refs/heads/master
|
wsfev1.py
|
4
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Módulo para obtener CAE/CAEA, código de autorización electrónico webservice
WSFEv1 de AFIP (Factura Electrónica Nacional - Proyecto Version 1 - 2.5)
Según RG 2485/08, RG 2757/2010, RG 2904/2010 y RG2926/10 (CAE anticipado),
RG 3067/2011 (RS - Monotributo), RG 3571/2013 (Responsables inscriptos IVA),
RG 3668/2014 (Factura A IVA F.8001), RG 3749/2015 (R.I. y exentos)
Más info: http://www.sistemasagiles.com.ar/trac/wiki/ProyectoWSFEv1
"""
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010-2015 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.16d"
import datetime
import decimal
import os
import sys
from utils import verifica, inicializar_y_capturar_excepciones, BaseWS, get_install_dir
HOMO = False # solo homologación
TYPELIB = False # usar librería de tipos (TLB)
LANZAR_EXCEPCIONES = False # valor por defecto: True
#WSDL = "https://www.sistemasagiles.com.ar/simulador/wsfev1/call/soap?WSDL=None"
WSDL = "https://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL"
#WSDL = "file:///home/reingart/tmp/service.asmx.xml"
class WSFEv1(BaseWS):
"Interfaz para el WebService de Factura Electrónica Version 1 - 2.5"
_public_methods_ = ['CrearFactura', 'AgregarIva', 'CAESolicitar',
'AgregarTributo', 'AgregarCmpAsoc', 'AgregarOpcional',
'CompUltimoAutorizado', 'CompConsultar',
'CAEASolicitar', 'CAEAConsultar', 'CAEARegInformativo',
'CAEASinMovimientoInformar',
'ParamGetTiposCbte',
'ParamGetTiposConcepto',
'ParamGetTiposDoc',
'ParamGetTiposIva',
'ParamGetTiposMonedas',
'ParamGetTiposOpcional',
'ParamGetTiposTributos',
'ParamGetCotizacion',
'ParamGetPtosVenta',
'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML',
'SetParametros', 'SetTicketAcceso', 'GetParametro',
'EstablecerCampoFactura',
'Dummy', 'Conectar', 'DebugLog', 'SetTicketAcceso']
_public_attrs_ = ['Token', 'Sign', 'Cuit',
'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
'XmlRequest', 'XmlResponse', 'Version', 'Excepcion', 'LanzarExcepciones',
'Resultado', 'Obs', 'Observaciones', 'Traceback', 'InstallDir',
'CAE','Vencimiento', 'Eventos', 'Errores', 'ErrCode', 'ErrMsg',
'Reprocesar', 'Reproceso', 'EmisionTipo', 'CAEA',
'CbteNro', 'CbtDesde', 'CbtHasta', 'FechaCbte',
'ImpTotal', 'ImpNeto', 'ImptoLiq',
'ImpIVA', 'ImpOpEx', 'ImpTrib',]
_reg_progid_ = "WSFEv1"
_reg_clsid_ = "{CA0E604D-E3D7-493A-8880-F6CDD604185E}"
if TYPELIB:
_typelib_guid_ = '{B1D7283C-3EC2-463E-89B4-11F5228E2A15}'
_typelib_version_ = 1, 13
_com_interfaces_ = ['IWSFEv1']
##_reg_class_spec_ = "wsfev1.WSFEv1"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
Reprocesar = True # recuperar automaticamente CAE emitidos
LanzarExcepciones = LANZAR_EXCEPCIONES
factura = None
def inicializar(self):
BaseWS.inicializar(self)
self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None
self.Resultado = self.Motivo = self.Reproceso = ''
self.LastID = self.LastCMP = self.CAE = self.CAEA = self.Vencimiento = ''
self.CbteNro = self.CbtDesde = self.CbtHasta = self.PuntoVenta = None
self.ImpTotal = self.ImpIVA = self.ImpOpEx = self.ImpNeto = self.ImptoLiq = self.ImpTrib = None
self.EmisionTipo = self.Periodo = self.Orden = ""
self.FechaCbte = self.FchVigDesde = self.FchVigHasta = self.FchTopeInf = self.FchProceso = ""
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
if 'Errors' in ret:
errores = ret['Errors']
for error in errores:
self.Errores.append("%s: %s" % (
error['Err']['Code'],
error['Err']['Msg'],
))
self.ErrCode = ' '.join([str(error['Err']['Code']) for error in errores])
self.ErrMsg = '\n'.join(self.Errores)
if 'Events' in ret:
events = ret['Events']
self.Eventos = ['%s: %s' % (evt['code'], evt['msg']) for evt in events]
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
result = self.client.FEDummy()['FEDummyResult']
self.AppServerStatus = result['AppServer']
self.DbServerStatus = result['DbServer']
self.AuthServerStatus = result['AuthServer']
return True
def CrearFactura(self, concepto=1, tipo_doc=80, nro_doc="", tipo_cbte=1, punto_vta=0,
cbt_desde=0, cbt_hasta=0, imp_total=0.00, imp_tot_conc=0.00, imp_neto=0.00,
imp_iva=0.00, imp_trib=0.00, imp_op_ex=0.00, fecha_cbte="", fecha_venc_pago=None,
fecha_serv_desde=None, fecha_serv_hasta=None, #--
moneda_id="PES", moneda_ctz="1.0000", caea=None, **kwargs
):
"Creo un objeto factura (interna)"
# Creo una factura electronica de exportación
fact = {'tipo_doc': tipo_doc, 'nro_doc': nro_doc,
'tipo_cbte': tipo_cbte, 'punto_vta': punto_vta,
'cbt_desde': cbt_desde, 'cbt_hasta': cbt_hasta,
'imp_total': imp_total, 'imp_tot_conc': imp_tot_conc,
'imp_neto': imp_neto, 'imp_iva': imp_iva,
'imp_trib': imp_trib, 'imp_op_ex': imp_op_ex,
'fecha_cbte': fecha_cbte,
'fecha_venc_pago': fecha_venc_pago,
'moneda_id': moneda_id, 'moneda_ctz': moneda_ctz,
'concepto': concepto,
'cbtes_asoc': [],
'tributos': [],
'iva': [],
'opcionales': [],
}
if fecha_serv_desde: fact['fecha_serv_desde'] = fecha_serv_desde
if fecha_serv_hasta: fact['fecha_serv_hasta'] = fecha_serv_hasta
if caea: fact['caea'] = caea
self.factura = fact
return True
def EstablecerCampoFactura(self, campo, valor):
if campo in self.factura or campo in ('fecha_serv_desde', 'fecha_serv_hasta', 'caea', 'fch_venc_cae'):
self.factura[campo] = valor
return True
else:
return False
def AgregarCmpAsoc(self, tipo=1, pto_vta=0, nro=0, **kwarg):
"Agrego un comprobante asociado a una factura (interna)"
cmp_asoc = {'tipo': tipo, 'pto_vta': pto_vta, 'nro': nro}
self.factura['cbtes_asoc'].append(cmp_asoc)
return True
def AgregarTributo(self, tributo_id=0, desc="", base_imp=0.00, alic=0, importe=0.00, **kwarg):
"Agrego un tributo a una factura (interna)"
tributo = { 'tributo_id': tributo_id, 'desc': desc, 'base_imp': base_imp,
'alic': alic, 'importe': importe}
self.factura['tributos'].append(tributo)
return True
def AgregarIva(self, iva_id=0, base_imp=0.0, importe=0.0, **kwarg):
"Agrego un tributo a una factura (interna)"
iva = { 'iva_id': iva_id, 'base_imp': base_imp, 'importe': importe }
self.factura['iva'].append(iva)
return True
def AgregarOpcional(self, opcional_id=0, valor="", **kwarg):
"Agrego un dato opcional a una factura (interna)"
op = { 'opcional_id': opcional_id, 'valor': valor }
self.factura['opcionales'].append(op)
return True
@inicializar_y_capturar_excepciones
def CAESolicitar(self):
f = self.factura
ret = self.client.FECAESolicitar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
FeCAEReq={
'FeCabReq': {'CantReg': 1,
'PtoVta': f['punto_vta'],
'CbteTipo': f['tipo_cbte']},
'FeDetReq': [{'FECAEDetRequest': {
'Concepto': f['concepto'],
'DocTipo': f['tipo_doc'],
'DocNro': f['nro_doc'],
'CbteDesde': f['cbt_desde'],
'CbteHasta': f['cbt_hasta'],
'CbteFch': f['fecha_cbte'],
'ImpTotal': f['imp_total'],
'ImpTotConc': f['imp_tot_conc'],
'ImpNeto': f['imp_neto'],
'ImpOpEx': f['imp_op_ex'],
'ImpTrib': f['imp_trib'],
'ImpIVA': f['imp_iva'],
# Fechas solo se informan si Concepto in (2,3)
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f.get('fecha_venc_pago'),
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f['fecha_venc_pago'],
'MonId': f['moneda_id'],
'MonCotiz': f['moneda_ctz'],
'CbtesAsoc': f['cbtes_asoc'] and [
{'CbteAsoc': {
'Tipo': cbte_asoc['tipo'],
'PtoVta': cbte_asoc['pto_vta'],
'Nro': cbte_asoc['nro']}}
for cbte_asoc in f['cbtes_asoc']] or None,
'Tributos': f['tributos'] and [
{'Tributo': {
'Id': tributo['tributo_id'],
'Desc': tributo['desc'],
'BaseImp': tributo['base_imp'],
'Alic': tributo['alic'],
'Importe': tributo['importe'],
}}
for tributo in f['tributos']] or None,
'Iva': f['iva'] and [
{'AlicIva': {
'Id': iva['iva_id'],
'BaseImp': iva['base_imp'],
'Importe': iva['importe'],
}}
for iva in f['iva']] or None,
'Opcionales': [
{'Opcional': {
'Id': opcional['opcional_id'],
'Valor': opcional['valor'],
}} for opcional in f['opcionales']] or None,
}
}]
})
result = ret['FECAESolicitarResult']
if 'FeCabResp' in result:
fecabresp = result['FeCabResp']
fedetresp = result['FeDetResp'][0]['FECAEDetResponse']
# Reprocesar en caso de error (recuperar CAE emitido anteriormente)
if self.Reprocesar and ('Errors' in result or 'Observaciones' in fedetresp):
for error in result.get('Errors',[])+fedetresp.get('Observaciones',[]):
err_code = str(error.get('Err', error.get('Obs'))['Code'])
if fedetresp['Resultado']=='R' and err_code=='10016':
# guardo los mensajes xml originales
xml_request = self.client.xml_request
xml_response = self.client.xml_response
cae = self.CompConsultar(f['tipo_cbte'], f['punto_vta'], f['cbt_desde'], reproceso=True)
if cae and self.EmisionTipo=='CAE':
self.Reproceso = 'S'
return cae
self.Reproceso = 'N'
# reestablesco los mensajes xml originales
self.client.xml_request = xml_request
self.client.xml_response = xml_response
self.Resultado = fecabresp['Resultado']
# Obs:
for obs in fedetresp.get('Observaciones', []):
self.Observaciones.append("%(Code)s: %(Msg)s" % (obs['Obs']))
self.Obs = '\n'.join(self.Observaciones)
self.CAE = fedetresp['CAE'] and str(fedetresp['CAE']) or ""
self.EmisionTipo = 'CAE'
self.Vencimiento = fedetresp['CAEFchVto']
self.FechaCbte = fedetresp.get('CbteFch', "") #.strftime("%Y/%m/%d")
self.CbteNro = fedetresp.get('CbteHasta', 0) # 1L
self.PuntoVenta = fecabresp.get('PtoVta', 0) # 4000
self.CbtDesde =fedetresp.get('CbteDesde', 0)
self.CbtHasta = fedetresp.get('CbteHasta', 0)
self.__analizar_errores(result)
return self.CAE
@inicializar_y_capturar_excepciones
def CompTotXRequest(self):
ret = self.client.FECompTotXRequest (
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
result = ret['FECompTotXRequestResult']
return result['RegXReq']
@inicializar_y_capturar_excepciones
def CompUltimoAutorizado(self, tipo_cbte, punto_vta):
ret = self.client.FECompUltimoAutorizado(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
PtoVta=punto_vta,
CbteTipo=tipo_cbte,
)
result = ret['FECompUltimoAutorizadoResult']
self.CbteNro = result['CbteNro']
self.__analizar_errores(result)
return self.CbteNro is not None and str(self.CbteNro) or ''
@inicializar_y_capturar_excepciones
def CompConsultar(self, tipo_cbte, punto_vta, cbte_nro, reproceso=False):
difs = [] # si hay reproceso, verifico las diferencias con AFIP
ret = self.client.FECompConsultar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
FeCompConsReq={
'CbteTipo': tipo_cbte,
'CbteNro': cbte_nro,
'PtoVta': punto_vta,
})
result = ret['FECompConsultarResult']
if 'ResultGet' in result:
resultget = result['ResultGet']
if reproceso:
# verifico los campos registrados coincidan con los enviados:
f = self.factura
verificaciones = {
'Concepto': f['concepto'],
'DocTipo': f['tipo_doc'],
'DocNro': f['nro_doc'],
'CbteDesde': f['cbt_desde'],
'CbteHasta': f['cbt_hasta'],
'CbteFch': f['fecha_cbte'],
'ImpTotal': f['imp_total'] and float(f['imp_total']) or 0.0,
'ImpTotConc': f['imp_tot_conc'] and float(f['imp_tot_conc']) or 0.0,
'ImpNeto': f['imp_neto'] and float(f['imp_neto']) or 0.0,
'ImpOpEx': f['imp_op_ex'] and float(f['imp_op_ex']) or 0.0,
'ImpTrib': f['imp_trib'] and float(f['imp_trib']) or 0.0,
'ImpIVA': f['imp_iva'] and float(f['imp_iva']) or 0.0,
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f.get('fecha_venc_pago'),
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f['fecha_venc_pago'],
'MonId': f['moneda_id'],
'MonCotiz': float(f['moneda_ctz']),
'CbtesAsoc': [
{'CbteAsoc': {
'Tipo': cbte_asoc['tipo'],
'PtoVta': cbte_asoc['pto_vta'],
'Nro': cbte_asoc['nro']}}
for cbte_asoc in f['cbtes_asoc']],
'Tributos': [
{'Tributo': {
'Id': tributo['tributo_id'],
'Desc': tributo['desc'],
'BaseImp': float(tributo['base_imp']),
'Alic': float(tributo['alic']),
'Importe': float(tributo['importe']),
}}
for tributo in f['tributos']],
'Iva': [
{'AlicIva': {
'Id': iva['iva_id'],
'BaseImp': float(iva['base_imp']),
'Importe': float(iva['importe']),
}}
for iva in f['iva']],
}
verifica(verificaciones, resultget.copy(), difs)
if difs:
print "Diferencias:", difs
self.log("Diferencias: %s" % difs)
self.FechaCbte = resultget['CbteFch'] #.strftime("%Y/%m/%d")
self.CbteNro = resultget['CbteHasta'] # 1L
self.PuntoVenta = resultget['PtoVta'] # 4000
self.Vencimiento = resultget['FchVto'] #.strftime("%Y/%m/%d")
self.ImpTotal = str(resultget['ImpTotal'])
cod_aut = resultget['CodAutorizacion'] and str(resultget['CodAutorizacion']) or ''# 60423794871430L
self.Resultado = resultget['Resultado']
self.CbtDesde =resultget['CbteDesde']
self.CbtHasta = resultget['CbteHasta']
self.ImpTotal = resultget['ImpTotal']
self.ImpNeto = resultget['ImpNeto']
self.ImptoLiq = self.ImpIVA = resultget['ImpIVA']
self.ImpOpEx = resultget['ImpOpEx']
self.ImpTrib = resultget['ImpTrib']
self.EmisionTipo = resultget['EmisionTipo']
if self.EmisionTipo=='CAE':
self.CAE = cod_aut
elif self.EmisionTipo=='CAEA':
self.CAEA = cod_aut
self.__analizar_errores(result)
if not difs:
return self.CAE or self.CAEA
else:
return ''
@inicializar_y_capturar_excepciones
def CAESolicitarLote(self):
f = self.factura
ret = self.client.FECAESolicitar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
FeCAEReq={
'FeCabReq': {'CantReg': 250,
'PtoVta': f['punto_vta'],
'CbteTipo': f['tipo_cbte']},
'FeDetReq': [{'FECAEDetRequest': {
'Concepto': f['concepto'],
'DocTipo': f['tipo_doc'],
'DocNro': f['nro_doc'],
'CbteDesde': f['cbt_desde']+k,
'CbteHasta': f['cbt_hasta']+k,
'CbteFch': f['fecha_cbte'],
'ImpTotal': f['imp_total'],
'ImpTotConc': f['imp_tot_conc'],
'ImpNeto': f['imp_neto'],
'ImpOpEx': f['imp_op_ex'],
'ImpTrib': f['imp_trib'],
'ImpIVA': f['imp_iva'],
# Fechas solo se informan si Concepto in (2,3)
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f.get('fecha_venc_pago'),
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f['fecha_venc_pago'],
'MonId': f['moneda_id'],
'MonCotiz': f['moneda_ctz'],
'CbtesAsoc': [
{'CbteAsoc': {
'Tipo': cbte_asoc['tipo'],
'PtoVta': cbte_asoc['pto_vta'],
'Nro': cbte_asoc['nro']}}
for cbte_asoc in f['cbtes_asoc']],
'Tributos': [
{'Tributo': {
'Id': tributo['tributo_id'],
'Desc': tributo['desc'],
'BaseImp': tributo['base_imp'],
'Alic': tributo['alic'],
'Importe': tributo['importe'],
}}
for tributo in f['tributos']],
'Iva': [
{'AlicIva': {
'Id': iva['iva_id'],
'BaseImp': iva['base_imp'],
'Importe': iva['importe'],
}}
for iva in f['iva']],
'Opcionales': [
{'Opcional': {
'Id': opcional['opcional_id'],
'Valor': opcional['valor'],
}} for opcional in f['opcionales']] or None,
}
} for k in range (250)]
})
result = ret['FECAESolicitarResult']
if 'FeCabResp' in result:
fecabresp = result['FeCabResp']
fedetresp = result['FeDetResp'][0]['FECAEDetResponse']
# Reprocesar en caso de error (recuperar CAE emitido anteriormente)
if self.Reprocesar and 'Errors' in result:
for error in result['Errors']:
err_code = str(error['Err']['Code'])
if fedetresp['Resultado']=='R' and err_code=='10016':
cae = self.CompConsultar(f['tipo_cbte'], f['punto_vta'], f['cbt_desde'], reproceso=True)
if cae and self.EmisionTipo=='CAEA':
self.Reproceso = 'S'
return cae
self.Reproceso = 'N'
self.Resultado = fecabresp['Resultado']
# Obs:
for obs in fedetresp.get('Observaciones', []):
self.Observaciones.append("%(Code)s: %(Msg)s" % (obs['Obs']))
self.Obs = '\n'.join(self.Observaciones)
self.CAE = fedetresp['CAE'] and str(fedetresp['CAE']) or ""
self.EmisionTipo = 'CAE'
self.Vencimiento = fedetresp['CAEFchVto']
self.FechaCbte = fedetresp['CbteFch'] #.strftime("%Y/%m/%d")
self.CbteNro = fedetresp['CbteHasta'] # 1L
self.PuntoVenta = fecabresp['PtoVta'] # 4000
self.CbtDesde =fedetresp['CbteDesde']
self.CbtHasta = fedetresp['CbteHasta']
self.__analizar_errores(result)
return self.CAE
@inicializar_y_capturar_excepciones
def CAEASolicitar(self, periodo, orden):
ret = self.client.FECAEASolicitar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
Periodo=periodo,
Orden=orden,
)
result = ret['FECAEASolicitarResult']
self.__analizar_errores(result)
if 'ResultGet' in result:
result = result['ResultGet']
if 'CAEA' in result:
self.CAEA = result['CAEA']
self.Periodo = result['Periodo']
self.Orden = result['Orden']
self.FchVigDesde = result['FchVigDesde']
self.FchVigHasta = result['FchVigHasta']
self.FchTopeInf = result['FchTopeInf']
self.FchProceso = result['FchProceso']
return self.CAEA and str(self.CAEA) or ''
@inicializar_y_capturar_excepciones
def CAEAConsultar(self, periodo, orden):
"Método de consulta de CAEA"
ret = self.client.FECAEAConsultar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
Periodo=periodo,
Orden=orden,
)
result = ret['FECAEAConsultarResult']
self.__analizar_errores(result)
if 'ResultGet' in result:
result = result['ResultGet']
if 'CAEA' in result:
self.CAEA = result['CAEA']
self.Periodo = result['Periodo']
self.Orden = result['Orden']
self.FchVigDesde = result['FchVigDesde']
self.FchVigHasta = result['FchVigHasta']
self.FchTopeInf = result['FchTopeInf']
self.FchProceso = result['FchProceso']
return self.CAEA and str(self.CAEA) or ''
@inicializar_y_capturar_excepciones
def CAEARegInformativo(self):
"Método para informar comprobantes emitidos con CAEA"
f = self.factura
ret = self.client.FECAEARegInformativo(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
FeCAEARegInfReq={
'FeCabReq': {'CantReg': 1,
'PtoVta': f['punto_vta'],
'CbteTipo': f['tipo_cbte']},
'FeDetReq': [{'FECAEADetRequest': {
'Concepto': f['concepto'],
'DocTipo': f['tipo_doc'],
'DocNro': f['nro_doc'],
'CbteDesde': f['cbt_desde'],
'CbteHasta': f['cbt_hasta'],
'CbteFch': f['fecha_cbte'],
'ImpTotal': f['imp_total'],
'ImpTotConc': f['imp_tot_conc'],
'ImpNeto': f['imp_neto'],
'ImpOpEx': f['imp_op_ex'],
'ImpTrib': f['imp_trib'],
'ImpIVA': f['imp_iva'],
# Fechas solo se informan si Concepto in (2,3)
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f.get('fecha_venc_pago'),
'FchServDesde': f.get('fecha_serv_desde'),
'FchServHasta': f.get('fecha_serv_hasta'),
'FchVtoPago': f['fecha_venc_pago'],
'MonId': f['moneda_id'],
'MonCotiz': f['moneda_ctz'],
'CbtesAsoc': [
{'CbteAsoc': {
'Tipo': cbte_asoc['tipo'],
'PtoVta': cbte_asoc['pto_vta'],
'Nro': cbte_asoc['nro']}}
for cbte_asoc in f['cbtes_asoc']]
if f['cbtes_asoc'] else None,
'Tributos': [
{'Tributo': {
'Id': tributo['tributo_id'],
'Desc': tributo['desc'],
'BaseImp': tributo['base_imp'],
'Alic': tributo['alic'],
'Importe': tributo['importe'],
}}
for tributo in f['tributos']]
if f['tributos'] else None,
'Iva': [
{'AlicIva': {
'Id': iva['iva_id'],
'BaseImp': iva['base_imp'],
'Importe': iva['importe'],
}}
for iva in f['iva']]
if f['iva'] else None,
'CAEA': f['caea'],
}
}]
})
result = ret['FECAEARegInformativoResult']
if 'FeCabResp' in result:
fecabresp = result['FeCabResp']
fedetresp = result['FeDetResp'][0]['FECAEADetResponse']
# Reprocesar en caso de error (recuperar CAE emitido anteriormente)
if self.Reprocesar and 'Errors' in result:
for error in result['Errors']:
err_code = str(error['Err']['Code'])
if fedetresp['Resultado']=='R' and err_code=='703':
caea = self.CompConsultar(f['tipo_cbte'], f['punto_vta'], f['cbt_desde'], reproceso=True)
if caea and self.EmisionTipo=='CAE':
self.Reproceso = 'S'
return caea
self.Reproceso = 'N'
self.Resultado = fecabresp['Resultado']
# Obs:
for obs in fedetresp.get('Observaciones', []):
self.Observaciones.append("%(Code)s: %(Msg)s" % (obs['Obs']))
self.Obs = '\n'.join(self.Observaciones)
self.CAEA = fedetresp['CAEA'] and str(fedetresp['CAEA']) or ""
self.EmisionTipo = 'CAEA'
self.FechaCbte = fedetresp['CbteFch'] #.strftime("%Y/%m/%d")
self.CbteNro = fedetresp['CbteHasta'] # 1L
self.PuntoVenta = fecabresp['PtoVta'] # 4000
self.CbtDesde =fedetresp['CbteDesde']
self.CbtHasta = fedetresp['CbteHasta']
self.__analizar_errores(result)
return self.CAEA
@inicializar_y_capturar_excepciones
def CAEASinMovimientoInformar(self, punto_vta, caea):
"Método para informar CAEA sin movimiento"
ret = self.client.FECAEASinMovimientoInformar(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
PtoVta=punto_vta,
CAEA=caea,
)
result = ret['FECAEASinMovimientoInformarResult']
self.__analizar_errores(result)
if 'CAEA' in result:
self.CAEA = result['CAEA']
if 'FchProceso' in result:
self.FchProceso = result['FchProceso']
if 'Resultado' in result:
self.Resultado = result['Resultado']
self.PuntoVenta = result['PtoVta'] # 4000
return self.Resultado or ''
@inicializar_y_capturar_excepciones
def ParamGetTiposCbte(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de Comprobantes"
ret = self.client.FEParamGetTiposCbte(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposCbteResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['CbteTipo']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetTiposConcepto(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de Conceptos"
ret = self.client.FEParamGetTiposConcepto(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposConceptoResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['ConceptoTipo']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetTiposDoc(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de Documentos"
ret = self.client.FEParamGetTiposDoc(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposDocResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['DocTipo']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetTiposIva(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de Alícuotas"
ret = self.client.FEParamGetTiposIva(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposIvaResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['IvaTipo']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetTiposMonedas(self, sep="|"):
"Recuperador de valores referenciales de códigos de Monedas"
ret = self.client.FEParamGetTiposMonedas(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposMonedasResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['Moneda']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetTiposOpcional(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de datos opcionales"
ret = self.client.FEParamGetTiposOpcional(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposOpcionalResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['OpcionalTipo']).replace("\t", sep)
for p in res.get('ResultGet', [])]
@inicializar_y_capturar_excepciones
def ParamGetTiposTributos(self, sep="|"):
"Recuperador de valores referenciales de códigos de Tipos de Tributos"
"Este método permite consultar los tipos de tributos habilitados en este WS"
ret = self.client.FEParamGetTiposTributos(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret['FEParamGetTiposTributosResult']
return [(u"%(Id)s\t%(Desc)s\t%(FchDesde)s\t%(FchHasta)s" % p['TributoTipo']).replace("\t", sep)
for p in res['ResultGet']]
@inicializar_y_capturar_excepciones
def ParamGetCotizacion(self, moneda_id):
"Recuperador de cotización de moneda"
ret = self.client.FEParamGetCotizacion(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
MonId=moneda_id,
)
self.__analizar_errores(ret)
res = ret['FEParamGetCotizacionResult']['ResultGet']
return str(res.get('MonCotiz',""))
@inicializar_y_capturar_excepciones
def ParamGetPtosVenta(self, sep="|"):
"Recuperador de valores referenciales Puntos de Venta registrados"
ret = self.client.FEParamGetPtosVenta(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
)
res = ret.get('FEParamGetPtosVentaResult', {})
return [(u"%(Nro)s\tEmisionTipo:%(EmisionTipo)s\tBloqueado:%(Bloqueado)s\tFchBaja:%(FchBaja)s" % p['PtoVenta']).replace("\t", sep)
for p in res.get('ResultGet', [])]
def p_assert_eq(a,b):
print a, a==b and '==' or '!=', b
def main():
"Función principal de pruebas (obtener CAE)"
import os, time
DEBUG = '--debug' in sys.argv
if DEBUG:
from pysimplesoap.client import __version__ as soapver
print "pysimplesoap.__version__ = ", soapver
wsfev1 = WSFEv1()
wsfev1.LanzarExcepciones = True
cache = None
wsdl = "https://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL"
proxy = ""
wrapper = "" #"pycurl"
cacert = None #geotrust.crt"
ok = wsfev1.Conectar(cache, wsdl, proxy, wrapper, cacert)
if not ok:
raise RuntimeError(wsfev1.Excepcion)
if DEBUG:
print "LOG: ", wsfev1.DebugLog()
if "--dummy" in sys.argv:
print wsfev1.client.help("FEDummy")
wsfev1.Dummy()
print "AppServerStatus", wsfev1.AppServerStatus
print "DbServerStatus", wsfev1.DbServerStatus
print "AuthServerStatus", wsfev1.AuthServerStatus
sys.exit(0)
# obteniendo el TA para pruebas
from wsaa import WSAA
ta = WSAA().Autenticar("wsfe", "reingart.crt", "reingart.key", debug=True)
wsfev1.SetTicketAcceso(ta)
wsfev1.Cuit = "20267565393"
if "--prueba" in sys.argv:
print wsfev1.client.help("FECAESolicitar").encode("latin1")
tipo_cbte = 2 if '--usados' not in sys.argv else 49
punto_vta = 4001
cbte_nro = long(wsfev1.CompUltimoAutorizado(tipo_cbte, punto_vta) or 0)
fecha = datetime.datetime.now().strftime("%Y%m%d")
concepto = 2 if '--usados' not in sys.argv else 1
tipo_doc = 80 if '--usados' not in sys.argv else 30
nro_doc = "30500010912" # CUIT BNA
cbt_desde = cbte_nro + 1; cbt_hasta = cbte_nro + 1
imp_total = "122.00"; imp_tot_conc = "0.00"; imp_neto = "100.00"
imp_iva = "21.00"; imp_trib = "1.00"; imp_op_ex = "0.00"
fecha_cbte = fecha
# Fechas del período del servicio facturado y vencimiento de pago:
if concepto > 1:
fecha_venc_pago = fecha
fecha_serv_desde = fecha; fecha_serv_hasta = fecha
else:
fecha_venc_pago = fecha_serv_desde = fecha_serv_hasta = None
moneda_id = 'PES'; moneda_ctz = '1.000'
wsfev1.CrearFactura(concepto, tipo_doc, nro_doc, tipo_cbte, punto_vta,
cbt_desde, cbt_hasta, imp_total, imp_tot_conc, imp_neto,
imp_iva, imp_trib, imp_op_ex, fecha_cbte, fecha_venc_pago,
fecha_serv_desde, fecha_serv_hasta, #--
moneda_id, moneda_ctz)
# comprobantes asociados (notas de crédito / débito)
if tipo_cbte in (1, 2, 3, 6, 7, 8, 11, 12, 13):
tipo = 3
pto_vta = 2
nro = 1234
wsfev1.AgregarCmpAsoc(tipo, pto_vta, nro)
# otros tributos:
id = 99
desc = 'Impuesto Municipal Matanza'
base_imp = 100
alic = 1
importe = 1
wsfev1.AgregarTributo(id, desc, base_imp, alic, importe)
# subtotales por alicuota de IVA:
id = 5 # 21%
base_imp = 100
importe = 21
wsfev1.AgregarIva(id, base_imp, importe)
# datos opcionales para proyectos promovidos:
if '--proyectos' in sys.argv:
wsfev1.AgregarOpcional(2, "1234") # identificador del proyecto
# datos opcionales para RG Bienes Usados 3411 (del vendedor):
if '--usados' in sys.argv:
wsfev1.AgregarOpcional(91, "Juan Perez") # Nombre y Apellido
wsfev1.AgregarOpcional(92, "200") # Nacionalidad
wsfev1.AgregarOpcional(93, "Balcarce 50") # Domicilio
# datos opcionales para RG 3668 Impuesto al Valor Agregado - Art.12:
if '--rg3668' in sys.argv:
wsfev1.AgregarOpcional(5, "02") # IVA Excepciones
wsfev1.AgregarOpcional(61, "80") # Firmante Doc Tipo
wsfev1.AgregarOpcional(62, "20267565393") # Firmante Doc Nro
wsfev1.AgregarOpcional(7, "01") # Carácter del Firmante
import time
t0 = time.time()
wsfev1.CAESolicitar()
t1 = time.time()
print "Resultado", wsfev1.Resultado
print "Reproceso", wsfev1.Reproceso
print "CAE", wsfev1.CAE
print "Vencimiento", wsfev1.Vencimiento
if DEBUG:
print "t0", t0
print "t1", t1
print "lapso", t1-t0
open("xmlrequest.xml","wb").write(wsfev1.XmlRequest)
open("xmlresponse.xml","wb").write(wsfev1.XmlResponse)
wsfev1.AnalizarXml("XmlResponse")
p_assert_eq(wsfev1.ObtenerTagXml('CAE'), str(wsfev1.CAE))
p_assert_eq(wsfev1.ObtenerTagXml('Concepto'), '2')
p_assert_eq(wsfev1.ObtenerTagXml('Obs',0,'Code'), "10063")
print wsfev1.ObtenerTagXml('Obs',0,'Msg')
if "--reprocesar" in sys.argv:
print "reprocesando...."
wsfev1.Reproceso = True
wsfev1.CAESolicitar()
if "--get" in sys.argv:
tipo_cbte = 2
punto_vta = 4001
cbte_nro = wsfev1.CompUltimoAutorizado(tipo_cbte, punto_vta)
wsfev1.CompConsultar(tipo_cbte, punto_vta, cbte_nro)
print "FechaCbte = ", wsfev1.FechaCbte
print "CbteNro = ", wsfev1.CbteNro
print "PuntoVenta = ", wsfev1.PuntoVenta
print "ImpTotal =", wsfev1.ImpTotal
print "CAE = ", wsfev1.CAE
print "Vencimiento = ", wsfev1.Vencimiento
print "EmisionTipo = ", wsfev1.EmisionTipo
wsfev1.AnalizarXml("XmlResponse")
p_assert_eq(wsfev1.ObtenerTagXml('CodAutorizacion'), str(wsfev1.CAE))
p_assert_eq(wsfev1.ObtenerTagXml('CbteFch'), wsfev1.FechaCbte)
p_assert_eq(wsfev1.ObtenerTagXml('MonId'), "PES")
p_assert_eq(wsfev1.ObtenerTagXml('MonCotiz'), "1")
p_assert_eq(wsfev1.ObtenerTagXml('DocTipo'), "80")
p_assert_eq(wsfev1.ObtenerTagXml('DocNro'), "30500010912")
if "--parametros" in sys.argv:
import codecs, locale, traceback
if sys.stdout.encoding is None:
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout,"replace");
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr,"replace");
print u'\n'.join(wsfev1.ParamGetTiposDoc())
print "=== Tipos de Comprobante ==="
print u'\n'.join(wsfev1.ParamGetTiposCbte())
print "=== Tipos de Concepto ==="
print u'\n'.join(wsfev1.ParamGetTiposConcepto())
print "=== Tipos de Documento ==="
print u'\n'.join(wsfev1.ParamGetTiposDoc())
print "=== Alicuotas de IVA ==="
print u'\n'.join(wsfev1.ParamGetTiposIva())
print "=== Monedas ==="
print u'\n'.join(wsfev1.ParamGetTiposMonedas())
print "=== Tipos de datos opcionales ==="
print u'\n'.join(wsfev1.ParamGetTiposOpcional())
print "=== Tipos de Tributo ==="
print u'\n'.join(wsfev1.ParamGetTiposTributos())
print "=== Puntos de Venta ==="
print u'\n'.join(wsfev1.ParamGetPtosVenta())
if "--cotizacion" in sys.argv:
print wsfev1.ParamGetCotizacion('DOL')
if "--comptox" in sys.argv:
print wsfev1.CompTotXRequest()
if "--ptosventa" in sys.argv:
print wsfev1.ParamGetPtosVenta()
if "--solicitar-caea" in sys.argv:
periodo = sys.argv[sys.argv.index("--solicitar-caea")+1]
orden = sys.argv[sys.argv.index("--solicitar-caea")+2]
if DEBUG:
print "Solicitando CAEA para periodo %s orden %s" % (periodo, orden)
caea = wsfev1.CAEASolicitar(periodo, orden)
print "CAEA:", caea
if wsfev1.Errores:
print "Errores:"
for error in wsfev1.Errores:
print error
if DEBUG:
print "periodo:", wsfev1.Periodo
print "orden:", wsfev1.Orden
print "fch_vig_desde:", wsfev1.FchVigDesde
print "fch_vig_hasta:", wsfev1.FchVigHasta
print "fch_tope_inf:", wsfev1.FchTopeInf
print "fch_proceso:", wsfev1.FchProceso
if not caea:
print 'Consultando CAEA'
caea = wsfev1.CAEAConsultar(periodo, orden)
print "CAEA:", caea
if wsfev1.Errores:
print "Errores:"
for error in wsfev1.Errores:
print error
if "--sinmovimiento-caea" in sys.argv:
punto_vta = sys.argv[sys.argv.index("--sinmovimiento-caea")+1]
caea = sys.argv[sys.argv.index("--sinmovimiento-caea")+2]
if DEBUG:
print "Informando Punto Venta %s CAEA %s SIN MOVIMIENTO" % (punto_vta, caea)
resultado = wsfev1.CAEASinMovimientoInformar(punto_vta, caea)
print "Resultado:", resultado
print "fch_proceso:", wsfev1.FchProceso
if wsfev1.Errores:
print "Errores:"
for error in wsfev1.Errores:
print error
# busco el directorio de instalación (global para que no cambie si usan otra dll)
INSTALL_DIR = WSFEv1.InstallDir = get_install_dir()
if __name__ == '__main__':
if "--register" in sys.argv or "--unregister" in sys.argv:
import pythoncom
if TYPELIB:
if '--register' in sys.argv:
tlb = os.path.abspath(os.path.join(INSTALL_DIR, "typelib", "wsfev1.tlb"))
print "Registering %s" % (tlb,)
tli=pythoncom.LoadTypeLib(tlb)
pythoncom.RegisterTypeLib(tli, tlb)
elif '--unregister' in sys.argv:
k = WSFEv1
pythoncom.UnRegisterTypeLib(k._typelib_guid_,
k._typelib_version_[0],
k._typelib_version_[1],
0,
pythoncom.SYS_WIN32)
print "Unregistered typelib"
import win32com.server.register
#print "_reg_class_spec_", WSFEv1._reg_class_spec_
win32com.server.register.UseCommandLine(WSFEv1)
elif "/Automate" in sys.argv:
# MS seems to like /automate to run the class factories.
import win32com.server.localserver
#win32com.server.localserver.main()
# start the server.
win32com.server.localserver.serve([WSFEv1._reg_clsid_])
else:
main()
|
benjaminmgross/asset_class
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# Asset Class documentation build configuration file, created by
# sphinx-quickstart on Mon May 19 02:02:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
s = os.path.abspath('../')
sys.path.append(s)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Asset Class'
copyright = u'2014, Benjamin M. Gross'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AssetClassdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'AssetClass.tex', u'Asset Class Documentation',
u'Benjamin M. Gross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'assetclass', u'Asset Class Documentation',
[u'Benjamin M. Gross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AssetClass', u'Asset Class Documentation',
u'Benjamin M. Gross', 'AssetClass', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/pause.py
|
69
|
# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import sys
import termios
import time
import tty
from os import isatty
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import PY3
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
try:
import curses
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
HAS_CURSES = True
except curses.error:
HAS_CURSES = False
except ImportError:
HAS_CURSES = False
if HAS_CURSES:
MOVE_TO_BOL = curses.tigetstr('cr')
CLEAR_TO_EOL = curses.tigetstr('el')
else:
MOVE_TO_BOL = b'\r'
CLEAR_TO_EOL = b'\x1b[K'
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
def clear_line(stdout):
stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
BYPASS_HOST_LOOP = True
_VALID_ARGS = frozenset(('echo', 'minutes', 'prompt', 'seconds'))
def run(self, tmp=None, task_vars=None):
''' run the pause action module '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
duration_unit = 'minutes'
prompt = None
seconds = None
echo = True
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
# Should keystrokes be echoed to stdout?
if 'echo' in self._task.args:
try:
echo = boolean(self._task.args['echo'])
except TypeError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
# Is 'prompt' a key in 'args'?
if 'prompt' in self._task.args:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
# Are 'minutes' or 'seconds' keys that exist in 'args'?
if 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
result['failed'] = True
result['msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(e)
return result
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
stdin_fd = None
old_settings = None
try:
if seconds is not None:
if seconds < 1:
seconds = 1
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
# show the prompt specified in the task
if 'prompt' in self._task.args:
display.display(prompt)
else:
display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
stdin_fd = None
stdout_fd = None
try:
if PY3:
stdin = self._connection._new_stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = self._connection._new_stdin
stdout = sys.stdout
stdin_fd = stdin.fileno()
stdout_fd = stdout.fileno()
except (ValueError, AttributeError):
# ValueError: someone is using a closed file descriptor as stdin
# AttributeError: someone is using a null file descriptor as stdin on windoez
stdin = None
if stdin_fd is not None:
if isatty(stdin_fd):
# grab actual Ctrl+C sequence
try:
intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
except Exception:
# unsupported/not present, use default
intr = b'\x03' # value for Ctrl+C
# get backspace sequences
try:
backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
except Exception:
backspace = [b'\x7f', b'\x08']
old_settings = termios.tcgetattr(stdin_fd)
tty.setraw(stdin_fd)
# Only set stdout to raw mode if it is a TTY. This is needed when redirecting
# stdout to a file since a file cannot be set to raw mode.
if isatty(stdout_fd):
tty.setraw(stdout_fd)
# Only echo input if no timeout is specified
if not seconds and echo:
new_settings = termios.tcgetattr(stdin_fd)
new_settings[3] = new_settings[3] | termios.ECHO
termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(stdin, termios.TCIFLUSH)
while True:
try:
if stdin_fd is not None:
key_pressed = stdin.read(1)
if key_pressed == intr: # value for Ctrl+C
clear_line(stdout)
raise KeyboardInterrupt
if not seconds:
if stdin_fd is None or not isatty(stdin_fd):
display.warning("Not waiting for response to prompt as stdin is not interactive")
break
# read key presses and act accordingly
if key_pressed in (b'\r', b'\n'):
clear_line(stdout)
break
elif key_pressed in backspace:
# delete a character if backspace is pressed
result['user_input'] = result['user_input'][:-1]
clear_line(stdout)
if echo:
stdout.write(result['user_input'])
stdout.flush()
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
signal.alarm(0)
display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a(stdin):
clear_line(stdout)
break
clear_line(stdout)
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin stdin_fd
if not(None in (stdin_fd, old_settings)) and isatty(stdin_fd):
termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = to_text(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')
return result
def _c_or_a(self, stdin):
while True:
key_pressed = stdin.read(1)
if key_pressed.lower() == b'a':
return False
elif key_pressed.lower() == b'c':
return True
|
AlexandreProenca/django-elasticsearch
|
refs/heads/master
|
django_elasticsearch/tests/test_restframework.py
|
1
|
# -*- coding: utf-8 -*-
import mock
from rest_framework import status
from rest_framework import VERSION
from rest_framework.settings import api_settings
from rest_framework.test import APIClient
from django.test import TestCase
from django.db.models.query import QuerySet
from django.contrib.auth.models import User
from elasticsearch import TransportError
from django_elasticsearch.client import es_client
from django_elasticsearch.tests.utils import withattrs
from django_elasticsearch.contrib.restframework import ElasticsearchFilterBackend
from test_app.models import TestModel
class Fake():
pass
class EsRestFrameworkTestCase(TestCase):
def setUp(self):
TestModel.es.create_index()
self.model1 = TestModel.objects.create(username='1', first_name='test')
self.model1.es.do_index()
self.model2 = TestModel.objects.create(username='2', last_name='test')
self.model2.es.do_index()
self.model3 = TestModel.objects.create(username='whatever')
self.model3.es.do_index()
TestModel.es.do_update()
self.fake_request = Fake()
if int(VERSION[0]) < 3:
self.fake_request.QUERY_PARAMS = {api_settings.SEARCH_PARAM: 'test'}
else:
self.fake_request.query_params = {api_settings.SEARCH_PARAM: 'test'}
self.fake_request.GET = {api_settings.SEARCH_PARAM: 'test'}
self.fake_view = Fake()
self.fake_view.action = 'list'
def tearDown(self):
super(EsRestFrameworkTestCase, self).tearDown()
es_client.indices.delete(index=TestModel.es.get_index())
def _test_filter_backend(self):
queryset = TestModel.es.all()
filter_backend = ElasticsearchFilterBackend()
queryset = filter_backend.filter_queryset(self.fake_request, queryset, self.fake_view)
l = queryset.deserialize()
self.assertTrue(self.model1 in l)
self.assertTrue(self.model2 in l)
self.assertFalse(self.model3 in l)
def test_filter_backend(self):
self._test_filter_backend()
def test_filter_backend_on_normal_model(self):
filter_backend = ElasticsearchFilterBackend()
with self.assertRaises(ValueError):
filter_backend.filter_queryset(self.fake_request, User.objects.all(), self.fake_view)
def test_filter_backend_ordering(self):
queryset = TestModel.es.all()
filter_backend = ElasticsearchFilterBackend()
self.fake_view.ordering = ('-username',)
queryset = filter_backend.filter_queryset(self.fake_request, queryset, self.fake_view).deserialize()
self.assertEqual(queryset[0].id, self.model2.id)
self.assertEqual(queryset[1].id, self.model1.id)
del self.fake_view.ordering
def test_filter_backend_no_list(self):
queryset = TestModel.es.all()
filter_backend = ElasticsearchFilterBackend()
self.fake_view.action = 'create'
queryset = filter_backend.filter_queryset(self.fake_request, queryset, self.fake_view)
# the 'normal' dataflow continues
self.assertTrue(isinstance(queryset, QuerySet))
self.fake_view.action = 'list'
def _test_filter_backend_filters(self):
r = self.client.get('/rf/tests/', {'username': '1'})
self.assertEqual(r.data['count'], 1)
self.assertEqual(r.data['results'][0]['id'], self.model1.id)
def test_filter_backend_filters(self):
self._test_filter_backend_filters()
def test_404(self):
r = self.client.get('/rf/tests/354xyz/', {'username': '1'})
self.assertEqual(r.status_code, 404)
def _test_pagination(self):
r = self.client.get('/rf/tests/', {'ordering': '-id', 'page': 2, 'page_size':1})
self.assertEqual(r.data['count'], 3)
self.assertEqual(r.data['results'][0]['id'], self.model2.id)
def test_pagination(self):
self._test_pagination()
@withattrs(TestModel.Elasticsearch, 'facets_fields', ['first_name',])
def test_facets(self):
queryset = TestModel.es.all()
filter_backend = ElasticsearchFilterBackend()
s = filter_backend.filter_queryset(self.fake_request, queryset, self.fake_view)
expected = {u'doc_count': 3,
u'first_name': {u'buckets': [{u'doc_count': 1,
u'key': u'test'}]}}
self.assertEqual(s.facets, expected)
@withattrs(TestModel.Elasticsearch, 'facets_fields', ['first_name',])
def test_faceted_viewset(self):
r = self.client.get('/rf/tests/', {'q': 'test'})
self.assertTrue('facets' in r.data)
@withattrs(TestModel.Elasticsearch, 'suggest_fields', ['first_name'])
def test_suggestions_viewset(self):
r = self.client.get('/rf/tests/', {'q': 'tset'})
self.assertTrue('suggestions' in r.data)
self.assertEqual(r.data['suggestions']['first_name'][0]['options'][0]['text'], "test")
@withattrs(TestModel.Elasticsearch, 'completion_fields', ['username'])
def test_completion_viewset(self):
# need to re-index :(
TestModel.es.flush()
TestModel.es.do_update()
r = self.client.get('/rf/tests/autocomplete/', {'f': 'username',
'q': 'what'})
self.assertTrue('whatever' in r.data)
r = self.client.get('/rf/tests/autocomplete/', {'f': 'first_name',
'q': 'woo'})
# first_name is NOT in the completion_fields -> 404
self.assertEqual(r.status_code, 404)
def test_post_put_delete(self):
client = APIClient()
# make sure we don't break other methods
r = client.post('/rf/tests/', {
'email': u'test@test.com',
'username': u'test',
'password': u'test'
})
self.assertEqual(r.status_code, status.HTTP_201_CREATED) # created
pk = r.data['id']
r = client.patch('/rf/tests/{0}/'.format(pk), {
'username': u'test2',
'password': u'test'
}, format='json')
self.assertEqual(r.status_code, 200)
self.assertEqual(TestModel.objects.get(pk=pk).username, 'test2')
r = client.delete('/rf/tests/{0}/'.format(pk))
self.assertEqual(r.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(TestModel.objects.filter(pk=pk).exists())
def test_fallback_gracefully(self):
# Note: can't use override settings because of how restframework handle settings :(
#from django_elasticsearch.tests.urls import TestViewSet
from rest_framework.filters import DjangoFilterBackend, OrderingFilter
from rest_framework.settings import api_settings
api_settings.DEFAULT_FILTER_BACKENDS = (DjangoFilterBackend, OrderingFilter)
# TODO: better way to fake es cluster's death ?
with mock.patch.object(es_client, 'search') as mock_search:
mock_search.side_effect = TransportError()
with mock.patch.object(es_client, 'count') as mock_count:
mock_count.side_effect = TransportError()
with mock.patch.object(es_client, 'get') as mock_get:
mock_get.side_effect = TransportError()
# should fallback to a regular django queryset / filtering
r = self.client.get('/rf/tests/')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data['filter_status'], 'Failed')
self.assertEqual(r.data['count'], 3)
self._test_filter_backend_filters()
self._test_pagination()
|
ahuarte47/QGIS
|
refs/heads/master
|
python/console/console_compile_apis.py
|
59
|
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Module to generate prepared APIs for calltips and auto-completion.
-------------------
begin : 2012-09-10
copyright : (C) 2012 Larry Shaffer
email : larrys (at) dakotacarto (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Portions of this file contain code from Eric4 APIsManager module.
"""
import os
from qgis.PyQt.Qsci import QsciAPIs, QsciLexerPython
from qgis.PyQt.QtWidgets import QDialog, QDialogButtonBox
from qgis.PyQt.QtCore import QCoreApplication
from .ui_console_compile_apis import Ui_APIsDialogPythonConsole
class PrepareAPIDialog(QDialog):
def __init__(self, api_lexer, api_files, pap_file, parent=None):
QDialog.__init__(self, parent)
self.ui = Ui_APIsDialogPythonConsole()
self.ui.setupUi(self)
self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Compile APIs"))
self.ui.plainTextEdit.setVisible(False)
self.ui.textEdit_Qsci.setVisible(False)
self.adjustSize()
self._api = None
self.ui.buttonBox.rejected.connect(self._stopPreparation)
self._api_files = api_files
self._api_lexer = api_lexer
self._pap_file = pap_file
def _clearLexer(self):
# self.ui.textEdit_Qsci.setLexer(0)
self.qlexer = None
def _stopPreparation(self):
if self._api is not None:
self._api.cancelPreparation()
self._api = None
self._clearLexer()
self.close()
def _preparationFinished(self):
self._clearLexer()
if os.path.exists(self._pap_file):
os.remove(self._pap_file)
self.ui.label.setText(QCoreApplication.translate("PythonConsole", "Saving prepared file…"))
prepd = self._api.savePrepared(self._pap_file)
rslt = self.tr("Error")
if prepd:
rslt = QCoreApplication.translate("PythonConsole", "Saved")
self.ui.label.setText('{0} {1}'.format(self.ui.label.text(), rslt))
self._api = None
self.ui.progressBar.setVisible(False)
self.ui.buttonBox.button(QDialogButtonBox.Cancel).setText(
QCoreApplication.translate("PythonConsole", "Done"))
self.adjustSize()
def prepareAPI(self):
# self.ui.textEdit_Qsci.setLexer(0)
exec('self.qlexer = {0}(self.ui.textEdit_Qsci)'.format(self._api_lexer))
# self.ui.textEdit_Qsci.setLexer(self.qlexer)
self._api = QsciAPIs(self.qlexer)
self._api.apiPreparationFinished.connect(self._preparationFinished)
for api_file in self._api_files:
self._api.load(api_file)
try:
self._api.prepare()
except Exception as err:
self._api = None
self._clearLexer()
self.ui.label.setText(QCoreApplication.translate("PythonConsole", "Error preparing file…"))
self.ui.progressBar.setVisible(False)
self.ui.plainTextEdit.setVisible(True)
self.ui.plainTextEdit.insertPlainText(err)
self.ui.buttonBox.button(QDialogButtonBox.Cancel).setText(self.tr("Done"))
self.adjustSize()
|
alfie-max/Publish
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
setup(
name = 'python-publish',
version = '1.0.0',
author = 'Alfred Dominic, Shahul Hameed',
author_email = 'alfie.2012@gmail.com',
packages = find_packages(exclude=[]),
scripts = ['publish'],
url = 'https://github.com/alfie-max/publish',
description = 'Program to broadcast an announcement on multiple social media channels',
long_description = open('README.md').read(),
install_requires=[
"argparse==1.2.1",
"beautifulsoup4==4.3.2",
"configobj==5.0.5",
"facebook-sdk==0.4.0",
"mechanize==0.2.5",
"Pillow==2.5.3",
"termcolor==1.1.0",
"tweepy==2.3.0",
"wsgiref==0.1.2",
],
)
|
G0retZ/pjproject
|
refs/heads/master
|
tests/pjsua/scripts-pesq/201_codec_speex_8000.py
|
42
|
# $Id$
#
from inc_cfg import *
# Call with Speex/8000 codec
test_param = TestParam(
"PESQ codec Speex NB (RX side uses snd dev)",
[
InstanceParam("UA1", "--max-calls=1 --add-codec speex/8000 --clock-rate 8000 --play-file wavs/input.8.wav --null-audio"),
InstanceParam("UA2", "--max-calls=1 --add-codec speex/8000 --clock-rate 8000 --rec-file wavs/tmp.8.wav --auto-answer 200")
]
)
if (HAS_SND_DEV == 0):
test_param.skip = True
pesq_threshold = 3.0
|
veltzer/demos-python
|
refs/heads/master
|
src/examples/short/dictionaries/popitem_is_atomic.py
|
1
|
#!/usr/bin/env python
"""
This example shows d.popitem()
The main point is that this method is atomic and can be used
to distribute values of a dictionary between threads or processess
in a multi-threaded or multi-processed pythong application.
"""
d = {
"one": 1,
"two": 2,
"three": 3,
"four": 4,
}
while d:
key, value = d.popitem()
print(key, '-->', value)
|
specdb/uvqs
|
refs/heads/master
|
uvqs/fuv.py
|
1
|
""" Module to ingest SDSS III (aka BOSS) data products
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, json
import pdb
from astropy.table import Table, Column, vstack
from astropy.time import Time
from astropy.io import fits
from linetools import utils as ltu
from linetools.spectra import io as lsio
from specdb.build.utils import chk_for_duplicates
from specdb.build.utils import chk_meta
from specdb.build.utils import init_data
def grab_meta():
""" Grab meta Table
Returns
-------
boss_meta : Table
"""
#http://www.sdss.org/dr12/algorithms/boss-dr12-quasar-catalog/
uvqs_fuv = Table.read(os.getenv('DROPBOX_DIR')+'/z1QSO/database/uvq_dr1_v1.7.fits')
nuvqs = len(uvqs_fuv)
# DATE-OBS -- Grab from header
#t = Time(list(uvqs_fuv['MJD'].data), format='mjd', out_subfmt='date') # Fixes to YYYY-MM-DD
#boss_meta.add_column(Column(t.iso, name='DATE-OBS'))
# Add columns -- From specfiles
#boss_meta.add_column(Column(['BOSS']*nboss, name='INSTR'))
#boss_meta.add_column(Column(['BOTH']*nboss, name='GRATING'))
#boss_meta.add_column(Column([2100.]*nboss, name='R')) # RESOLUTION
#boss_meta.add_column(Column(['SDSS 2.5-M']*nboss, name='TELESCOPE'))
# Redshift logic
uvqs_fuv['zem_GROUP'] = uvqs_fuv['Z']
uvqs_fuv['sig_zem'] = uvqs_fuv['Z_SIG']
uvqs_fuv['flag_zem'] = [str('UVQS')]*nuvqs
# Rename RA/DEC
uvqs_fuv.rename_column('RA', 'RA_GROUP')
uvqs_fuv.rename_column('DEC', 'DEC_GROUP')
# STYPE
uvqs_fuv['STYPE'] = [str('QSO')]*nuvqs
# Check
assert chk_meta(uvqs_fuv, chk_cat_only=True)
# Return
return uvqs_fuv
def hdf5_adddata(hdf, sname, meta, debug=False, chk_meta_only=False, **kwargs):
""" Add BOSS data to the DB
Parameters
----------
hdf : hdf5 pointer
IDs : ndarray
int array of IGM_ID values in mainDB
sname : str
Survey name
chk_meta_only : bool, optional
Only check meta file; will not write
boss_hdf : str, optional
Returns
-------
"""
from specdb import defs
from astropy.time import Time
# Init
Rdicts = defs.get_res_dicts()
dpath = os.getenv('DROPBOX_DIR') + 'z1QSO/database/1D_Spectra/'
# Add Survey
print("Adding {:s} survey to DB".format(sname))
uvqs_grp = hdf.create_group(sname)
# Build spectra (and parse for meta)
nspec = len(meta)
max_npix = 30000 # Just needs to be large enough
data = init_data(max_npix, include_co=False)
# Init
spec_set = hdf[sname].create_dataset('spec', data=data, chunks=True,
maxshape=(None,), compression='gzip')
spec_set.resize((nspec,))
wvminlist = []
wvmaxlist = []
speclist = []
npixlist = []
instrlist = []
gratinglist = []
Rlist = []
timelist = []
telelist = []
# Loop
maxpix = 0
for jj,row in enumerate(meta):
# Generate full file
full_file = dpath+row['SPEC_FILE']
#print("Reading {:s}".format(full_file))
# Read
if 'CAHA' in full_file:
spec = lsio.readspec(full_file, masking='none')
else:
spec = lsio.readspec(full_file)
# npix
try:
npix = spec.npix
except ValueError: # Bad CAHA data
npix = spec.flux.size
maxpix = max(npix,maxpix)
# Parse name
fname = full_file.split('/')[-1]
# Fill
for key in ['wave','flux','sig']:
data[key] = 0. # Important to init (for compression too)
data['flux'][0][:npix] = spec.flux.value
data['sig'][0][:npix] = spec.sig.value
data['wave'][0][:npix] = spec.wavelength.value
# Meta
speclist.append(str(fname))
wvminlist.append(np.min(data['wave'][0][:npix]))
wvmaxlist.append(np.max(data['wave'][0][:npix]))
npixlist.append(npix)
if 'LCO' in full_file:
instrlist.append('duPont-BCS')
telelist.append('duPont')
gratinglist.append('600/5000') # Not accurate for all data, I suspect
Rlist.append(1200.)
timelist.append(spec.header['UT-DATE']+'T'+spec.header['UT-TIME'])
elif 'Lick' in full_file:
instrlist.append('Kast')
gratinglist.append('Both')
Rlist.append(1000.)
telelist.append('Lick-3m')
timelist.append(spec.header['DATE-OBS'])
elif 'CAHA' in full_file:
instrlist.append('CAFOS')
telelist.append('CAHA')
gratinglist.append('??')
Rlist.append(1000.)
if 'Aug' in row['OBS_DATE']:
timelist.append(row['OBS_DATE'][-4:]+'-08-01')
else:
pdb.set_trace() # TIME
elif 'Keck' in full_file:
instrlist.append('ESI')
telelist.append('Keck-II')
gratinglist.append('ECH')
Rlist.append(Rdicts['ESI'][spec.header['SLMSKNAM']])
timelist.append(spec.header['DATE-OBS']+'T'+spec.header['UT'])
elif 'MMT' in full_file:
instrlist.append('mmtbluechan')
telelist.append('MMT')
gratinglist.append('??')
Rlist.append(1000.)
timelist.append(spec.header['DATE-OBS']+'T'+spec.header['UT'])
elif 'Magellan' in full_file:
instrlist.append('MagE')
telelist.append('Magellan')
gratinglist.append('N/A')
Rlist.append(5000.)
timelist.append(spec.header['UT-DATE']+'T'+spec.header['UT-TIME'])
else:
pdb.set_trace()
# Only way to set the dataset correctly
spec_set[jj] = data
#
print("Max pix = {:d}".format(maxpix))
# Add columns
t = Time(timelist, out_subfmt='date') # Fixes to YYYY-MM-DD
meta.add_column(Column(t.iso, name='DATE-OBS'))
#meta.add_column(Column(speclist, name='SPEC_FILE'))
meta.add_column(Column(npixlist, name='NPIX'))
meta.add_column(Column(wvminlist, name='WV_MIN'))
meta.add_column(Column(wvmaxlist, name='WV_MAX'))
meta.add_column(Column(Rlist, name='R'))
meta.add_column(Column(gratinglist, name='DISPERSER'))
meta.add_column(Column(telelist, name='TELESCOPE'))
meta.add_column(Column(instrlist, name='INSTR'))
meta.add_column(Column(np.arange(nspec,dtype=int),name='GROUP_ID'))
meta.add_column(Column([2000.]*len(meta), name='EPOCH'))
# Add HDLLS meta to hdf5
if chk_meta(meta):
if chk_meta_only:
pdb.set_trace()
hdf[sname]['meta'] = meta
else:
pdb.set_trace()
raise ValueError("meta file failed")
# References
refs = [dict(url='http://adsabs.harvard.edu/abs/2016AJ....152...25M',
bib='uvqs'),
]
jrefs = ltu.jsonify(refs)
hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
#
return
def add_ssa(hdf, dset):
""" Add SSA info to meta dataset
Parameters
----------
hdf
dset : str
"""
from specdb.ssa import default_fields
Title = '{:s}: UVQS FUV Quasars'.format(dset)
ssa_dict = default_fields(Title, flux='flambda', fxcalib='RELATIVE')
hdf[dset]['meta'].attrs['SSA'] = json.dumps(ltu.jsonify(ssa_dict))
|
hellodata/hellodate
|
refs/heads/master
|
2/site-packages/django/utils/html_parser.py
|
79
|
from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
HTMLParseError = _html_parser.HTMLParseError
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
|
buckinha/gravity
|
refs/heads/master
|
optimize_ML.py
|
1
|
import MDP, MDP_opt, os
def optimize_ML(starting_policy_filename, objective_fn="J3"):
"""This function repeats short gradient decents until new pathways need to be generated.
"""
#Load the two pathway sets
print("")
print("Loading training and holdout sets...")
holdout_set = load_pathway_set("ML_holdout_set")
training_set = load_pathway_set("ML_training_set")
print("--- " + str(len(training_set)) + " training set pathways loaded")
print("--- " + str(len(holdout_set)) + " holdout set pathways loaded")
#when evaluating each new policy against the holdout set, some dissimprovement can be allowed
#this amount will be added (a disimprovement) to the previous best value. New values must fall
#below the sum, or else the policy will be considered invalid against the holdout set.
holdout_wiggle = 0
#flags
opt_fail = False
holdout_fail = False
#failsafe counter: ML loop will exit if it goes around this many times, irregardless of improvements
fail_safe_count = 10
#iterations need to be counted
iter_count = 0
#creating policy objects.
ML_pol = load_policy(starting_policy_filename)
holdout_pol = MDP.MDP_Policy(len(ML_pol.get_params()))
#copying values from ML_pol to holdout_pol without self-references
holdout_pol.set_params(ML_pol.get_params()[:])
#print("Creating optimizer objects...")
#create a FireGirlPolicyOptimizer object and load up the info it needs
opt = MDP_opt.Optimizer(len(ML_pol.b))
opt.pathway_set = training_set
opt.Policy = ML_pol
#populating initial weights
opt.calc_pathway_weights()
opt.set_obj_fn(objective_fn)
#create a second optimizer object
opt_holdout = MDP_opt.Optimizer(len(holdout_pol.b))
opt_holdout.pathway_set = holdout_set
def calc_pathway_average_prob(self, pathway):
"""Returns the average probability this pathway's actions, given the current policy"""
sum_of_probs = 0
for ev in pathway.events:
#use the current policy to calculate a new probability with the original features
sum_of_probs += self.Policy.calc_action_prob(ev)
#now that we've summed all the probabilities, divide by the total number of events
ave_prob = sum_of_probs / len(pathway.events)
return ave_prob
opt_holdout.Policy = holdout_pol
#populating initial weights
opt_holdout.calc_pathway_weights()
opt_holdout.set_obj_fn(objective_fn)
#Get the current values against the given policy
best_holdout_val = opt_holdout.calc_obj_fn()
best_opt_val = opt.calc_obj_fn()
#how many gradient decent steps to allow?
descent_steps = 1
print("")
print("Initial training set obj.fn. val: " + str(round(best_opt_val)) )
print("Initial holdout set obj.fn. val: " + str(round(best_holdout_val)) )
#Begining Machine Learning Loop
print("")
print("Beginning ML loop...")
while True:
#checking failsafe, to avoid infinite loops
iter_count += 1
if iter_count > fail_safe_count:
print("optimize_ML() has reached its failsafe limit on iterations, and is exiting")
break
print(" l-bfgs-b pass " + str(iter_count))
opt_result = opt.optimize_policy(descent_steps)
#pulling out individual results
#(ignoring the original values and just pulling the resultant values)
opt_result_b = opt_result[0][1]
opt_result_val = opt_result[1][1]
opt_result_dict = opt_result[2]
#checking for improvements gained by this policy
if opt_result_val <= best_opt_val:
#improvement was found, so record it
best_opt_val = opt_result_val
else:
#no improvement was found, so exit the loop
opt_fail = True
#checking for improvements gained against the holdout set
#setting params to the new ones
opt_holdout.Policy.set_params(opt_result_b[:])
new_holdout_val = opt_holdout.calc_obj_fn()
if new_holdout_val < best_holdout_val + holdout_wiggle:
#improvement was found, so record it
best_holdout_val = new_holdout_val
else:
#no improvement was found, so exit the loop
holdout_fail = True
#if improvements were found in BOTH the training AND the holdout set, record the new policy and continue
if (not opt_fail) and (not holdout_fail):
#improvements in both, so record the policy for the next iteration
ML_pol.set_params(opt_result_b[:])
else:
#one of the two failed, so exit the loop
break
#Machine Learning Loop has exited
print("")
if opt_fail:
print("scipy.optimize.fmin_l_bfgs_b has found a local optima")
if holdout_fail:
print("The final policy did not improve the expectation on the holdout set")
#print final values
print("Final training set obj.fn. val: " + str(round(best_opt_val)) )
print("Final holdout set obj.fn. val: " + str(round(best_holdout_val)) )
opt.save_policy("ML_policies" + os.sep + "ML_" + objective_fn + "_from_" + str(len(training_set)) + "_pathways.policy")
def load_pathway_set(subfolder):
"""Loads a saved set of pathways from current_directory/subfolder
"""
pathway_set = []
#look at every *.pathways file in the training set folder
for f in os.listdir(subfolder):
if f.endswith(".pathways"):
f_name = subfolder + os.sep + f
pkl_file = open(f_name, 'rb')
this_set = pickle.load(pkl_file)
pkl_file.close()
#force each pathway to update their values
#for pw in this_set:
# pw.update_net_value()
#and add these to the training set
pathway_set = pathway_set + this_set
return pathway_set
def load_policy(filename):
"""Loads a policy from the given folder"""
pkl_file = open(filename, 'rb')
pol = pickle.load(pkl_file)
pkl_file.close()
return pol
|
Backspace-Dev/x920d-jp
|
refs/heads/master
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
assassinen/python_training
|
refs/heads/master
|
data/groups.py
|
1
|
__author__ = 'NovikovII'
from model.group import Group
import random
import string
constant = [
Group(name="name1", header="header1", footer="footer1"),
]
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits #+ string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [
Group(name=name, header=header, footer=footer)
for name in ["", random_string("name", 10)]
for header in ["", random_string("header", 20)]
for footer in ["", random_string("footer", 20)]
]
# testdata = [Group(name="", header="", footer="")] + [
# Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
# for i in range(n)
# ]
#testdata = [Group(name="namea @RM&j ", header="header!4YJ,mAnXC,xfae B", footer="footerfQ 8C+THgLR#|")]
#testdata = [Group(name="", header="header/IPWt5D'_<Q", footer="")]
# testdata = [Group(name="", header="", footer="")] + [
# Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
# for i in range(5)
# ]
|
Alex-Just/gymlog
|
refs/heads/dev
|
gymlog/users/urls.py
|
57
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
|
JaDogg/__py_playground
|
refs/heads/master
|
reference/examples-v3/java/python/tests/t2.py
|
3
|
a = []
b = 3 # test end of line
# two in a row
# bar
# all by itself
# before stmt
a = 3
a = 4
# after
|
nelson-liu/scikit-learn
|
refs/heads/master
|
sklearn/utils/tests/test_estimator_checks.py
|
3
|
import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_no_fit_attributes_set_in_init
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self):
self.key = 0
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
assert_raises_regex(
ValueError, msg, check_estimator, NoSampleWeightPandasSeriesType)
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
def test_check_no_fit_attributes_set_in_init():
class NonConformantEstimator(object):
def __init__(self):
self.you_should_not_set_this_ = None
msg = ("By convention, attributes ending with '_'.+"
'should not be initialized in the constructor.+'
"Attribute 'you_should_not_set_this_' was found.+"
'in estimator estimator_name')
assert_raises_regex(AssertionError, msg,
check_no_fit_attributes_set_in_init,
'estimator_name',
NonConformantEstimator)
|
taohungyang/cloud-custodian
|
refs/heads/master
|
tests/test_report.py
|
1
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from c7n.policy import Policy
from c7n.reports.csvout import Formatter
from .common import Config, load_data
EC2_POLICY = Policy({"name": "report-test-ec2", "resource": "ec2"}, Config.empty())
ASG_POLICY = Policy({"name": "report-test-asg", "resource": "asg"}, Config.empty())
ELB_POLICY = Policy({"name": "report-test-elb", "resource": "elb"}, Config.empty())
class TestEC2Report(unittest.TestCase):
def setUp(self):
data = load_data("report.json")
self.records = data["ec2"]["records"]
self.headers = data["ec2"]["headers"]
self.rows = data["ec2"]["rows"]
def test_csv(self):
formatter = Formatter(EC2_POLICY.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
def test_custom_fields(self):
# Test the ability to include custom fields.
extra_fields = [
"custom_field=CustomField",
"missing_field=MissingField",
"custom_tag=tag:CustomTag",
]
# First do a test with adding custom fields to the normal ones
formatter = Formatter(
EC2_POLICY.resource_manager.resource_type, extra_fields=extra_fields
)
recs = [self.records["full"]]
rows = [self.rows["full_custom"]]
self.assertEqual(formatter.to_csv(recs), rows)
# Then do a test with only having custom fields
formatter = Formatter(
EC2_POLICY.resource_manager.resource_type,
extra_fields=extra_fields,
include_default_fields=False,
)
recs = [self.records["full"]]
rows = [self.rows["minimal_custom"]]
self.assertEqual(formatter.to_csv(recs), rows)
class TestASGReport(unittest.TestCase):
def setUp(self):
data = load_data("report.json")
self.records = data["asg"]["records"]
self.headers = data["asg"]["headers"]
self.rows = data["asg"]["rows"]
def test_csv(self):
formatter = Formatter(ASG_POLICY.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
class TestELBReport(unittest.TestCase):
def setUp(self):
data = load_data("report.json")
self.records = data["elb"]["records"]
self.headers = data["elb"]["headers"]
self.rows = data["elb"]["rows"]
def test_csv(self):
formatter = Formatter(ELB_POLICY.resource_manager.resource_type)
tests = [
(["full"], ["full"]),
(["minimal"], ["minimal"]),
(["full", "minimal"], ["full", "minimal"]),
(["full", "duplicate", "minimal"], ["full", "minimal"]),
]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
class TestMultiReport(unittest.TestCase):
def setUp(self):
data = load_data("report.json")
self.records = data["ec2"]["records"]
self.headers = data["ec2"]["headers"]
self.rows = data["ec2"]["rows"]
def test_csv(self):
# Test the extra headers for multi-policy
formatter = Formatter(
EC2_POLICY.resource_manager.resource_type,
include_region=True,
include_policy=True,
)
tests = [(["minimal"], ["minimal_multipolicy"])]
for rec_ids, row_ids in tests:
recs = list(map(lambda x: self.records[x], rec_ids))
rows = list(map(lambda x: self.rows[x], row_ids))
self.assertEqual(formatter.to_csv(recs), rows)
|
andela-earinde/bellatrix-py
|
refs/heads/master
|
app/js/lib/lib/modules/test/test_userdict.py
|
119
|
# Check every path through every method of UserDict
from test import test_support, mapping_tests
import UserDict
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = UserDict.IterableUserDict
def test_all(self):
# Test constructors
u = UserDict.UserDict()
u0 = UserDict.UserDict(d0)
u1 = UserDict.UserDict(d1)
u2 = UserDict.IterableUserDict(d2)
uu = UserDict.UserDict(u)
uu0 = UserDict.UserDict(u0)
uu1 = UserDict.UserDict(u1)
uu2 = UserDict.UserDict(u2)
# keyword arg constructor
self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(UserDict.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split(), 1), d5)
self.assertTrue(u1.fromkeys('one two'.split()) is not u1)
self.assertIsInstance(u1.fromkeys('one two'.split()), UserDict.UserDict)
self.assertIsInstance(u2.fromkeys('one two'.split()), UserDict.IterableUserDict)
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertEqual(repr(u2), repr(d2))
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(cmp(a, b), cmp(len(a), len(b)))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = UserDict.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = UserDict.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(UserDict.UserDict):
def display(self): print self
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(u2.keys(), d2.keys())
self.assertEqual(u2.items(), d2.items())
self.assertEqual(u2.values(), d2.values())
# Test has_key and "in".
for i in u2.keys():
self.assertIn(i, u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
with test_support.check_py3k_warnings():
self.assertTrue(u2.has_key(i))
self.assertEqual(u1.has_key(i), d1.has_key(i))
self.assertEqual(u0.has_key(i), d0.has_key(i))
# Test update
t = UserDict.UserDict()
t.update(u2)
self.assertEqual(t, u2)
class Items:
def items(self):
return (("x", 42), ("y", 23))
t = UserDict.UserDict()
t.update(Items())
self.assertEqual(t, {"x": 42, "y": 23})
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in xrange(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = UserDict.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assertTrue(t.has_key("x"))
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = UserDict.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = UserDict.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(UserDict.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(UserDict.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(UserDict.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
UserDict.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(UserDict.UserDict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
##########################
# Test Dict Mixin
class SeqDict(UserDict.DictMixin):
"""Dictionary lookalike implemented with lists.
Used to test and demonstrate DictMixin
"""
def __init__(self, other=None, **kwargs):
self.keylist = []
self.valuelist = []
if other is not None:
for (key, value) in other:
self[key] = value
for (key, value) in kwargs.iteritems():
self[key] = value
def __getitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
return self.valuelist[i]
def __setitem__(self, key, value):
try:
i = self.keylist.index(key)
self.valuelist[i] = value
except ValueError:
self.keylist.append(key)
self.valuelist.append(value)
def __delitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
self.keylist.pop(i)
self.valuelist.pop(i)
def keys(self):
return list(self.keylist)
def copy(self):
d = self.__class__()
for key, value in self.iteritems():
d[key] = value
return d
@classmethod
def fromkeys(cls, keys, value=None):
d = cls()
for key in keys:
d[key] = value
return d
class UserDictMixinTest(mapping_tests.TestMappingProtocol):
type2test = SeqDict
def test_all(self):
## Setup test and verify working of the test class
# check init
s = SeqDict()
# exercise setitem
s[10] = 'ten'
s[20] = 'twenty'
s[30] = 'thirty'
# exercise delitem
del s[20]
# check getitem and setitem
self.assertEqual(s[10], 'ten')
# check keys() and delitem
self.assertEqual(s.keys(), [10, 30])
## Now, test the DictMixin methods one by one
# has_key
self.assertTrue(s.has_key(10))
self.assertTrue(not s.has_key(20))
# __contains__
self.assertIn(10, s)
self.assertNotIn(20, s)
# __iter__
self.assertEqual([k for k in s], [10, 30])
# __len__
self.assertEqual(len(s), 2)
# iteritems
self.assertEqual(list(s.iteritems()), [(10,'ten'), (30, 'thirty')])
# iterkeys
self.assertEqual(list(s.iterkeys()), [10, 30])
# itervalues
self.assertEqual(list(s.itervalues()), ['ten', 'thirty'])
# values
self.assertEqual(s.values(), ['ten', 'thirty'])
# items
self.assertEqual(s.items(), [(10,'ten'), (30, 'thirty')])
# get
self.assertEqual(s.get(10), 'ten')
self.assertEqual(s.get(15,'fifteen'), 'fifteen')
self.assertEqual(s.get(15), None)
# setdefault
self.assertEqual(s.setdefault(40, 'forty'), 'forty')
self.assertEqual(s.setdefault(10, 'null'), 'ten')
del s[40]
# pop
self.assertEqual(s.pop(10), 'ten')
self.assertNotIn(10, s)
s[10] = 'ten'
self.assertEqual(s.pop("x", 1), 1)
s["x"] = 42
self.assertEqual(s.pop("x", 1), 42)
# popitem
k, v = s.popitem()
self.assertNotIn(k, s)
s[k] = v
# clear
s.clear()
self.assertEqual(len(s), 0)
# empty popitem
self.assertRaises(KeyError, s.popitem)
# update
s.update({10: 'ten', 20:'twenty'})
self.assertEqual(s[10], 'ten')
self.assertEqual(s[20], 'twenty')
# cmp
self.assertEqual(s, {10: 'ten', 20:'twenty'})
t = SeqDict()
t[20] = 'twenty'
t[10] = 'ten'
self.assertEqual(s, t)
def test_main():
test_support.run_unittest(
UserDictTest,
UserDictMixinTest
)
if __name__ == "__main__":
test_main()
|
ChenJunor/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py
|
173
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Tag(object):
"""
A name/value tag on an AutoScalingGroup resource.
:ivar key: The key of the tag.
:ivar value: The value of the tag.
:ivar propagate_at_launch: Boolean value which specifies whether the
new tag will be applied to instances launched after the tag is created.
:ivar resource_id: The name of the autoscaling group.
:ivar resource_type: The only supported resource type at this time
is "auto-scaling-group".
"""
def __init__(self, connection=None, key=None, value=None,
propagate_at_launch=False, resource_id=None,
resource_type='auto-scaling-group'):
self.connection = connection
self.key = key
self.value = value
self.propagate_at_launch = propagate_at_launch
self.resource_id = resource_id
self.resource_type = resource_type
def __repr__(self):
return 'Tag(%s=%s)' % (self.key, self.value)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
elif name == 'PropagateAtLaunch':
if value.lower() == 'true':
self.propagate_at_launch = True
else:
self.propagate_at_launch = False
elif name == 'ResourceId':
self.resource_id = value
elif name == 'ResourceType':
self.resource_type = value
def build_params(self, params, i):
"""
Populates a dictionary with the name/value pairs necessary
to identify this Tag in a request.
"""
prefix = 'Tags.member.%d.' % i
params[prefix + 'ResourceId'] = self.resource_id
params[prefix + 'ResourceType'] = self.resource_type
params[prefix + 'Key'] = self.key
params[prefix + 'Value'] = self.value
if self.propagate_at_launch:
params[prefix + 'PropagateAtLaunch'] = 'true'
else:
params[prefix + 'PropagateAtLaunch'] = 'false'
def delete(self):
return self.connection.delete_tags([self])
|
naziris/HomeSecPi
|
refs/heads/master
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/compat.py
|
146
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.compat
~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure that old stuff does not break on update.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import warnings
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Response
from werkzeug.test import create_environ
class CompatTestCase(WerkzeugTestCase):
def test_old_imports(self):
from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \
LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \
Authorization
def test_exposed_werkzeug_mod(self):
import werkzeug
for key in werkzeug.__all__:
# deprecated, skip it
if key in ('templates', 'Template'):
continue
getattr(werkzeug, key)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CompatTestCase))
return suite
|
jasonwee/asus-rt-n14uhp-mrtg
|
refs/heads/master
|
tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/pip/pep425tags.py
|
79
|
"""Generate and work with PEP 425 Compatibility Tags."""
from __future__ import absolute_import
import re
import sys
import warnings
import platform
import logging
import ctypes
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
from pip.compat import OrderedDict
logger = logging.getLogger(__name__)
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_impl_tag():
"""
Returns the Tag for this specific implementation.
"""
return "{0}{1}".format(get_abbr_impl(), get_impl_ver())
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
logger.debug("Config variable '%s' is unset, Python ABI tag may "
"be incorrect", var)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def _is_running_32bit():
return sys.maxsize == 2147483647
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
if sys.platform == 'darwin':
# distutils.util.get_platform() returns the release based on the value
# of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
# be signficantly older than the user's current machine.
release, _, machine = platform.mac_ver()
split_ver = release.split('.')
if machine == "x86_64" and _is_running_32bit():
machine = "i386"
elif machine == "ppc64" and _is_running_32bit():
machine = "ppc"
return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine)
# XXX remove distutils dependency
result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and _is_running_32bit():
# 32 bit Python program (running on a 64 bit Linux): pip should only
# install and run 32 bit compiled extensions in that case.
result = "linux_i686"
return result
def is_manylinux1_compatible():
# Only Linux, and only x86-64 / i686
if get_platform() not in ("linux_x86_64", "linux_i686"):
return False
# Check for presence of _manylinux module
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
# Fall through to heuristic check below
pass
# Check glibc version. CentOS 5 uses glibc 2.5.
return have_compatible_glibc(2, 5)
# Separated out from have_compatible_glibc for easier unit testing
def check_glibc_version(version_str, needed_major, needed_minor):
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn("Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, RuntimeWarning)
return False
return (int(m.group("major")) == needed_major and
int(m.group("minor")) >= needed_minor)
def have_compatible_glibc(major, minimum_minor):
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return False
# Call gnu_get_libc_version, which returns a string like "2.5".
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return check_glibc_version(version_str, major, minimum_minor)
def get_darwin_arches(major, minor, machine):
"""Return a list of supported arches (including group arches) for
the given major, minor and machine architecture of an OS X machine.
"""
arches = []
def _supports_arch(major, minor, arch):
# Looking at the application support for OS X versions in the chart
# provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears
# our timeline looks roughly like:
#
# 10.0 - Introduces ppc support.
# 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64
# and x86_64 support is CLI only, and cannot be used for GUI
# applications.
# 10.5 - Extends ppc64 and x86_64 support to cover GUI applications.
# 10.6 - Drops support for ppc64
# 10.7 - Drops support for ppc
#
# Given that we do not know if we're installing a CLI or a GUI
# application, we must be conservative and assume it might be a GUI
# application and behave as if ppc64 and x86_64 support did not occur
# until 10.5.
#
# Note: The above information is taken from the "Application support"
# column in the chart not the "Processor support" since I believe
# that we care about what instruction sets an application can use
# not which processors the OS supports.
if arch == 'ppc':
return (major, minor) <= (10, 5)
if arch == 'ppc64':
return (major, minor) == (10, 5)
if arch == 'i386':
return (major, minor) >= (10, 4)
if arch == 'x86_64':
return (major, minor) >= (10, 5)
if arch in groups:
for garch in groups[arch]:
if _supports_arch(major, minor, garch):
return True
return False
groups = OrderedDict([
("fat", ("i386", "ppc")),
("intel", ("x86_64", "i386")),
("fat64", ("x86_64", "ppc64")),
("fat32", ("x86_64", "i386", "ppc")),
])
if _supports_arch(major, minor, machine):
arches.append(machine)
for garch in groups:
if machine in groups[garch] and _supports_arch(major, minor, garch):
arches.append(garch)
arches.append('universal')
return arches
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
if sys.platform == 'darwin':
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
tpl = '{0}_{1}_%i_%s'.format(name, major)
arches = []
for m in reversed(range(int(minor) + 1)):
for a in get_darwin_arches(int(major), m, actual_arch):
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
elif is_manylinux1_compatible():
arches = [arch.replace('linux', 'manylinux1'), arch]
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# Has binaries, does not use the Python API:
for arch in arches:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
implementation_tag = get_impl_tag()
|
llhe/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
|
9
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from scipy import special
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class AssertCloseTest(test.TestCase):
def testAssertCloseIntegerDtype(self):
x = array_ops.placeholder(dtypes.int32)
y = x
z = array_ops.placeholder(dtypes.int32)
feed_dict = {x: [1, 5, 10, 15, 20], z: [2, 5, 10, 15, 20]}
with self.test_session():
with ops.control_dependencies([distribution_util.assert_close(x, y)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with ops.control_dependencies([distribution_util.assert_close(y, x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(x, z)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(y, z)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
def testAssertCloseNonIntegerDtype(self):
x = array_ops.placeholder(dtypes.float32)
y = x + 1e-8
z = array_ops.placeholder(dtypes.float32)
feed_dict = {x: [1., 5, 10, 15, 20], z: [2., 5, 10, 15, 20]}
with self.test_session():
with ops.control_dependencies([distribution_util.assert_close(x, y)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with ops.control_dependencies([distribution_util.assert_close(y, x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(x, z)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(y, z)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
def testAssertCloseEpsilon(self):
x = [0., 5, 10, 15, 20]
# x != y
y = [0.1, 5, 10, 15, 20]
# x = z
z = [1e-8, 5, 10, 15, 20]
with self.test_session():
with ops.control_dependencies([distribution_util.assert_close(x, z)]):
array_ops.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(x, y)]):
array_ops.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with ops.control_dependencies([distribution_util.assert_close(y, z)]):
array_ops.identity(y).eval()
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
# First component isn't less than float32.eps = 1e-7
z = array_ops.placeholder(dtypes.float32)
# This shouldn"t be detected as an integer.
w = array_ops.placeholder(dtypes.float32)
feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],
z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}
with self.test_session():
with ops.control_dependencies([distribution_util.assert_integer_form(x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("x has non-integer components"):
with ops.control_dependencies(
[distribution_util.assert_integer_form(y)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("x has non-integer components"):
with ops.control_dependencies(
[distribution_util.assert_integer_form(z)]):
array_ops.identity(z).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("x has non-integer components"):
with ops.control_dependencies(
[distribution_util.assert_integer_form(w)]):
array_ops.identity(w).eval(feed_dict=feed_dict)
class ShapesFromLocAndScaleTest(test.TestCase):
def test_static_loc_static_scale_non_matching_event_size_raises(self):
loc = constant_op.constant(np.zeros((2, 4)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
distribution_util.shapes_from_loc_and_scale(loc, scale)
def test_static_loc_static_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 2]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_static_loc_dynamic_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_static_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = constant_op.constant(np.ones((5, 2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session():
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
# batch_shape depends on both args, and so is dynamic. Since loc did not
# have static shape, we inferred event shape entirely from scale, and this
# is available statically.
self.assertAllEqual(
[5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_dynamic_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_none_loc_static_scale(self):
loc = None
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_none_loc_dynamic_scale(self):
loc = None
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.test_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 1], batch_shape)
self.assertAllEqual([3], event_shape)
class GetLogitsAndProbsTest(test.TestCase):
def testGetLogitsAndProbsImproperArguments(self):
with self.test_session():
with self.assertRaises(ValueError):
distribution_util.get_logits_and_probs(logits=None, probs=None)
with self.assertRaises(ValueError):
distribution_util.get_logits_and_probs(logits=[0.1], probs=[0.1])
def testGetLogitsAndProbsLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = special.logit(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_probs(
logits=logits, validate_args=True)
self.assertAllClose(p, new_p.eval())
self.assertAllClose(logits, new_logits.eval())
def testGetLogitsAndProbsLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_probs(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(new_p.eval(), p)
self.assertAllClose(new_logits.eval(), logits)
def testGetLogitsAndProbsProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_probs(
probs=p, validate_args=True)
self.assertAllClose(special.logit(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbsProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbsProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
with self.test_session():
_, prob = distribution_util.get_logits_and_probs(
probs=p, validate_args=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_probs(
probs=p2, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_probs(
probs=p2, validate_args=False)
prob.eval()
with self.assertRaisesOpError("probs has components greater than 1"):
_, prob = distribution_util.get_logits_and_probs(
probs=p3, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_probs(
probs=p3, validate_args=False)
prob.eval()
def testGetLogitsAndProbsProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
_, prob = distribution_util.get_logits_and_probs(
probs=p, multidimensional=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError(
"(probs has components greater than 1|probs does not sum to 1)"):
_, prob = distribution_util.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError("probs does not sum to 1"):
_, prob = distribution_util.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=False)
prob.eval()
class LogCombinationsTest(test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
log_combs = np.log(special.binom(n, k))
with self.test_session():
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, log_binom.eval())
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
with self.test_session():
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class DynamicShapeTest(test.TestCase):
def testSameDynamicShape(self):
with self.test_session():
scalar = constant_op.constant(2.0)
scalar1 = array_ops.placeholder(dtype=dtypes.float32)
vector = [0.3, 0.4, 0.5]
vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
multidimensional2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
# Scalar
self.assertTrue(
distribution_util.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
distribution_util.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
distribution_util.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
distribution_util.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
distribution_util.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
distribution_util.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
distribution_util.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
distribution_util.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
class RotateTransposeTest(test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
def testRollStatic(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "None values not supported."):
distribution_util.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = distribution_util.rotate_transpose(x, shift)
self.assertAllEqual(self._np_rotate_transpose(x, shift), y.eval())
self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())
def testRollDynamic(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
shift = array_ops.placeholder(dtypes.int32)
for x_value in (np.ones(
1, dtype=x.dtype.as_numpy_dtype()), np.ones(
(2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(
(3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(distribution_util.rotate_transpose(x, shift),
feed_dict={x: x_value,
shift: shift_value}))
class PickVectorTest(test.TestCase):
def testCorrectlyPicksVector(self):
with self.test_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(x,
distribution_util.pick_vector(
math_ops.less(0, 5), x, y).eval())
self.assertAllEqual(y,
distribution_util.pick_vector(
math_ops.less(5, 0), x, y).eval())
self.assertAllEqual(x,
distribution_util.pick_vector(
constant_op.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
distribution_util.pick_vector(
constant_op.constant(False), x, y)) # No eval.
class FillLowerTriangularTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_lower_triangular(self, x):
"""Numpy implementation of `fill_lower_triangular`."""
x = np.asarray(x)
d = x.shape[-1]
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
ids = np.tril_indices(n)
y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)
y[..., ids[0], ids[1]] = x
return y
def testCorrectlyMakes1x1LowerTril(self):
with self.test_session():
x = ops.convert_to_tensor(self._rng.randn(3, 1))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
def testCorrectlyMakesNoBatchLowerTril(self):
with self.test_session():
x = ops.convert_to_tensor(self._rng.randn(10))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
g = gradients_impl.gradients(
distribution_util.fill_lower_triangular(x), x)
self.assertAllEqual(np.tri(4).reshape(-1), g[0].values.eval())
def testCorrectlyMakesBatchLowerTril(self):
with self.test_session():
x = ops.convert_to_tensor(self._rng.randn(2, 2, 6))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
self.assertAllEqual(
np.ones((2, 2, 6)),
gradients_impl.gradients(
distribution_util.fill_lower_triangular(x), x)[0].eval())
class GenNewSeedTest(test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(distribution_util.gen_new_seed(0, "salt") is None)
self.assertTrue(distribution_util.gen_new_seed(None, "salt") is None)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu) as sess:
softplus = nn_ops.softplus(np_features)
softplus_inverse = distribution_util.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = sess.run([
softplus, softplus_inverse])
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
str(np_features.dtype), 1e-6)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
np.isfinite(tf_softplus))
self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
np.isfinite(tf_softplus_inverse))
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
logging.vlog(2, "softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testInverseSoftplusGradientNeverNan(self):
with self.test_session():
# Note that this range contains both zero and inf.
x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
y = distribution_util.softplus_inverse(x)
grads = gradients_impl.gradients(y, x)[0].eval()
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
if __name__ == "__main__":
test.main()
|
yoghadj/or-tools
|
refs/heads/master
|
data/nonogram_regular/nonogram_car.py
|
74
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Problem from ECLiPSe
# http:#eclipse.crosscoreop.com/eclipse/examples/nono.ecl.txt
# Problem n3 ( http:#www.pro.or.jp/~fuji/java/puzzle/nonogram/index-eng.html )
# 'Car'
#
rows = 10;
row_rule_len = 4;
row_rules = [
[0,0,0,4],
[0,1,1,6],
[0,1,1,6],
[0,1,1,6],
[0,0,4,9],
[0,0,1,1],
[0,0,1,1],
[0,2,7,2],
[1,1,1,1],
[0,0,2,2]
]
cols = 15;
col_rule_len = 2;
col_rules = [
[0,4],
[1,2],
[1,1],
[5,1],
[1,2],
[1,1],
[5,1],
[1,1],
[4,1],
[4,1],
[4,2],
[4,1],
[4,1],
[4,2],
[0,4]
]
|
Daniel-CA/odoo-addons
|
refs/heads/8.0
|
stock_inventory_line_ext/models/__init__.py
|
8
|
# -*- coding: utf-8 -*-
# © 2015 Esther Martín - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import stock
|
tekulvw/Squid-Plugins
|
refs/heads/master
|
emotes/emotes.py
|
1
|
import discord
from discord.ext import commands
import aiohttp
from cogs.utils import checks
from cogs.utils.dataIO import fileIO
import os
from __main__ import send_cmd_help
from io import BytesIO
try:
import PIL.Image as Image
except Exception as e:
raise RuntimeError("You must `pip3 install pillow` to use emotes") from e
class Emotes:
"""Twitch Emotes commands."""
def __init__(self, bot):
self.bot = bot
self.settings = fileIO("data/emotes/settings.json", "load")
self.emote_list = []
self.available_emotes = fileIO(
"data/emotes/available_emotes.json", "load")
self.emote_url = "https://api.twitch.tv/kraken/chat/emoticons"
self.session = aiohttp.ClientSession()
def __unload(self):
self.session.close()
def save_settings(self):
fileIO("data/emotes/settings.json", "save", self.settings)
def save_available_emotes(self):
fileIO("data/emotes/available_emotes.json",
"save", self.available_emotes)
def get_limit_per_message(self, server):
if server is None:
return 5
if not self._is_enabled(server):
return 5
return self.settings[server.id].get("LIMIT_PER_MESSAGE", 5)
def get_scale(self, server):
try:
return self.settings[server.id]["SCALE"]
except KeyError:
return 1.0
def set_limit_per_message(self, server, value):
if server is None:
return
if self._is_enabled(server):
self.settings[server.id]["LIMIT_PER_MESSAGE"] = int(value)
self.save_settings()
def set_scale(self, server, value):
if self._is_enabled(server):
self.settings[server.id]["SCALE"] = float(value)
self.save_settings()
async def update_emote_list(self):
async with self.session.get(self.emote_url) as r:
resp = await r.json()
data = resp.get("emoticons", {})
self.emote_list = data
def _is_enabled(self, server):
assert isinstance(server, discord.Server)
if server.id not in self.settings:
return False
if not self.settings[server.id]["ENABLED"]:
return False
return True
@commands.group(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def emoteset(self, ctx):
"""Various emote settings"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
# TODO server-specific settings
@emoteset.command(name="enabled", pass_context=True)
async def _emoteset_enabled(self, ctx, setting: bool):
"""Bool to see if emotes are enabled on this server."""
server = ctx.message.server
if server.id not in self.settings:
self.settings[server.id] = {}
self.settings[server.id]["ENABLED"] = bool(setting)
self.save_settings()
if server.id not in self.available_emotes:
self.available_emotes[server.id] = []
self.save_available_emotes()
if setting:
await self.bot.reply("emotes are now enabled.")
else:
await self.bot.reply("emotes are now disabled.")
@emoteset.command(name="limit", pass_context=True)
async def _emoteset_limit(self, ctx, limit: int):
"""Emote limit per message."""
if limit < 0:
await send_cmd_help(ctx)
if limit > 5:
limit = 5
self.set_limit_per_message(ctx.message.server, limit)
await self.bot.say("Limit set to {}.".format(limit))
@emoteset.command(name="scale", pass_context=True)
async def _emoteset_scale(self, ctx, scale: float):
"""Sets server emote scaling"""
if scale > 5 or scale < 0.5:
await self.bot.say("Scale must be between 0.5 and 3")
return
self.set_scale(ctx.message.server, scale)
await self.bot.say("Emote scale set to {}".format(scale))
def _write_image(self, chan_id, name, image_data):
# Assume channel folder already exists
with open('data/emotes/{}/{}'.format(chan_id, name), 'wb') as f:
f.write(image_data)
async def _remove_all_emotes(self, server, chan_id, name=""):
assert isinstance(server, discord.Server)
if server.id not in self.available_emotes:
return
self.available_emotes[server.id] = \
[emote for emote in self.available_emotes[server.id]
if emote["chan_id"] != chan_id or emote["name"] == name]
self.save_available_emotes()
async def _add_emote(self, server, chan_id):
assert isinstance(server, discord.Server)
if chan_id == -1:
return
if not os.path.exists("data/emotes/{}".format(chan_id)):
os.makedirs("data/emotes/{}".format(chan_id))
await self._remove_all_emotes(server, chan_id)
for emote in self.emote_list:
if chan_id == emote["images"][0].get("emoticon_set", -1):
url = emote["images"][0].get("url", "")
name = emote.get("regex", "")
file_name = url.split('/')[-1]
if url == "" or name == "":
continue
if not os.path.exists('data/emotes/{}/{}'.format(chan_id,
file_name)):
try:
async with aiohttp.get(url) as r:
image = await r.content.read()
except Exception as e:
print(
"Huh, I have no idea what errors aiohttp throws.")
print("This is one of them:")
print(e)
print(dir(e))
print("------")
continue
self._write_image(chan_id, file_name, image)
if server.id not in self.available_emotes:
self.available_emotes[server.id] = {}
self.available_emotes[server.id].append({
"name": name,
"file_name": file_name,
"chan_id": chan_id
})
self.save_available_emotes()
@commands.group(no_pm=True, pass_context=True,
invoke_without_command=True)
async def emote(self, ctx, emote_name: str):
"""Enabled emote and all emotes from same twitch channel"""
server = ctx.message.server
if not self._is_enabled(server):
await self.bot.say("Emotes are not enabled on this server.")
return
server_emotes = self.available_emotes[server.id]
if emote_name in server_emotes:
await self.bot.say(
"This server already has '{}'".format(emote_name))
return
await self.bot.say("Retrieving emotes from '{}'.".format(emote_name) +
" Please wait a moment.")
for emote in self.emote_list:
if emote_name == emote.get("regex", ""):
chan_id = emote["images"][0].get("emoticon_set", -1)
if chan_id == -1:
await self.bot.say("Yeah, something failed, try again "
"later?")
return
await self._add_emote(server, chan_id)
await self.bot.say("'{}' and other ".format(emote_name) +
"channel emotes added.")
return
await self.bot.say("No such emote '{}' found.".format(emote_name))
@emote.command(pass_context=True, name="update")
async def emote_update(self, ctx):
"""Refreshes list of emotes"""
await self.update_emote_list()
await self.bot.say("Updated emote list.")
async def check_messages(self, message):
if message.author.id == self.bot.user.id:
return
if message.channel.is_private:
return
if not self._is_enabled(message.server):
return
valid_emotes = self.available_emotes[message.server.id]
splitted = message.content.split(' ')
count = 0
for word in splitted:
for emote in valid_emotes:
if word == emote.get("name", ""):
fname = 'data/emotes/{}/{}'.format(
emote["chan_id"], emote["file_name"])
if not os.path.exists(fname):
break
img = Image.open(fname)
if self.get_scale(message.server) != 1.0:
scale = self.get_scale(message.server)
img = img.resize((int(img.width * scale),
int(img.height * scale)),
Image.ANTIALIAS)
tmpfile = BytesIO()
fmt = os.path.splitext(emote["file_name"])[1].replace('.',
'')
img.save(tmpfile, format=fmt)
tmpfile.seek(0)
await self.bot.send_file(message.channel, tmpfile,
filename=emote["file_name"])
tmpfile.close()
count += 1
if self.get_limit_per_message(message.server) != 0 and \
count >= \
self.get_limit_per_message(message.server):
return
break
def check_folders():
if not os.path.exists("data/emotes"):
print("Creating data/emotes folder...")
os.makedirs("data/emotes")
def check_files():
f = "data/emotes/settings.json"
if not fileIO(f, "check"):
print("Creating empty settings.json...")
fileIO(f, "save", {})
f = "data/emotes/available_emotes.json"
if not fileIO(f, "check"):
print("Creating empty available_emotes.json...")
fileIO(f, "save", {})
def setup(bot):
check_folders()
check_files()
n = Emotes(bot)
bot.loop.create_task(n.update_emote_list())
bot.add_listener(n.check_messages, "on_message")
bot.add_cog(n)
|
CrimeaCoin/p2pool
|
refs/heads/master
|
p2pool/util/logging.py
|
287
|
import codecs
import datetime
import os
import sys
from twisted.python import log
class EncodeReplacerPipe(object):
def __init__(self, inner_file):
self.inner_file = inner_file
self.softspace = 0
def write(self, data):
if isinstance(data, unicode):
try:
data = data.encode(self.inner_file.encoding, 'replace')
except:
data = data.encode('ascii', 'replace')
self.inner_file.write(data)
def flush(self):
self.inner_file.flush()
class LogFile(object):
def __init__(self, filename):
self.filename = filename
self.inner_file = None
self.reopen()
def reopen(self):
if self.inner_file is not None:
self.inner_file.close()
open(self.filename, 'a').close()
f = open(self.filename, 'rb')
f.seek(0, os.SEEK_END)
length = f.tell()
if length > 100*1000*1000:
f.seek(-1000*1000, os.SEEK_END)
while True:
if f.read(1) in ('', '\n'):
break
data = f.read()
f.close()
f = open(self.filename, 'wb')
f.write(data)
f.close()
self.inner_file = codecs.open(self.filename, 'a', 'utf-8')
def write(self, data):
self.inner_file.write(data)
def flush(self):
self.inner_file.flush()
class TeePipe(object):
def __init__(self, outputs):
self.outputs = outputs
def write(self, data):
for output in self.outputs:
output.write(data)
def flush(self):
for output in self.outputs:
output.flush()
class TimestampingPipe(object):
def __init__(self, inner_file):
self.inner_file = inner_file
self.buf = ''
self.softspace = 0
def write(self, data):
buf = self.buf + data
lines = buf.split('\n')
for line in lines[:-1]:
self.inner_file.write('%s %s\n' % (datetime.datetime.now(), line))
self.inner_file.flush()
self.buf = lines[-1]
def flush(self):
pass
class AbortPipe(object):
def __init__(self, inner_file):
self.inner_file = inner_file
self.softspace = 0
def write(self, data):
try:
self.inner_file.write(data)
except:
sys.stdout = sys.__stdout__
log.DefaultObserver.stderr = sys.stderr = sys.__stderr__
raise
def flush(self):
self.inner_file.flush()
class PrefixPipe(object):
def __init__(self, inner_file, prefix):
self.inner_file = inner_file
self.prefix = prefix
self.buf = ''
self.softspace = 0
def write(self, data):
buf = self.buf + data
lines = buf.split('\n')
for line in lines[:-1]:
self.inner_file.write(self.prefix + line + '\n')
self.inner_file.flush()
self.buf = lines[-1]
def flush(self):
pass
|
sarahgrogan/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/pool.py
|
237
|
"""Custom implementation of multiprocessing.Pool with custom pickler
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any"""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not"""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memmory mapped file"""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return np.memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = np.memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file"""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API. It might not be the case when using the MemmapingPool API
directly.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._context_id = context_id
self._prewarm = prewarm
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
if self._context_id is not None:
marker = self._context_id
else:
marker = hash(a)
basename = "%d-%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), id(a), marker)
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# Let's use the memmap reducer
return reduce_memmap(load(filename, mmap_mode=self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memmory mapped datastructures.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing"""
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memmory mapping in temp_folder.
Use None to disable memmaping of large arrays.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API.
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
use_shared_mem = False
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
atexit.register(lambda: delete_folder(pool_folder))
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
context_id=context_id, prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
super(MemmapingPool, self).terminate()
delete_folder(self._temp_folder)
|
msebire/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pydevd_attach_to_process/winappdbg/search.py
|
102
|
#!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Process memory finder
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Process memory search.
@group Memory search:
Search,
Pattern,
BytePattern,
TextPattern,
RegExpPattern,
HexPattern
"""
__revision__ = "$Id$"
__all__ = [
'Search',
'Pattern',
'BytePattern',
'TextPattern',
'RegExpPattern',
'HexPattern',
]
from winappdbg.textio import HexInput
from winappdbg.util import StaticClass, MemoryAddresses
from winappdbg import win32
import warnings
try:
# http://pypi.python.org/pypi/regex
import regex as re
except ImportError:
import re
#==============================================================================
class Pattern (object):
"""
Base class for search patterns.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
@see: L{Search.search_process}
"""
def __init__(self, pattern):
"""
Class constructor.
The only mandatory argument should be the pattern string.
This method B{MUST} be reimplemented by subclasses of L{Pattern}.
"""
raise NotImplementedError()
def __len__(self):
"""
Returns the maximum expected length of the strings matched by this
pattern. Exact behavior is implementation dependent.
Ideally it should be an exact value, but in some cases it's not
possible to calculate so an upper limit should be returned instead.
If that's not possible either an exception must be raised.
This value will be used to calculate the required buffer size when
doing buffered searches.
This method B{MUST} be reimplemented by subclasses of L{Pattern}.
"""
raise NotImplementedError()
def read(self, process, address, size):
"""
Reads the requested number of bytes from the process memory at the
given address.
Subclasses of L{Pattern} tipically don't need to reimplement this
method.
"""
return process.read(address, size)
def find(self, buffer, pos = None):
"""
Searches for the pattern in the given buffer, optionally starting at
the given position within the buffer.
This method B{MUST} be reimplemented by subclasses of L{Pattern}.
@type buffer: str
@param buffer: Buffer to search on.
@type pos: int
@param pos:
(Optional) Position within the buffer to start searching from.
@rtype: tuple( int, int )
@return: Tuple containing the following:
- Position within the buffer where a match is found, or C{-1} if
no match was found.
- Length of the matched data if a match is found, or undefined if
no match was found.
"""
raise NotImplementedError()
def found(self, address, size, data):
"""
This method gets called when a match is found.
This allows subclasses of L{Pattern} to filter out unwanted results,
or modify the results before giving them to the caller of
L{Search.search_process}.
If the return value is C{None} the result is skipped.
Subclasses of L{Pattern} don't need to reimplement this method unless
filtering is needed.
@type address: int
@param address: The memory address where the pattern was found.
@type size: int
@param size: The size of the data that matches the pattern.
@type data: str
@param data: The data that matches the pattern.
@rtype: tuple( int, int, str )
@return: Tuple containing the following:
* The memory address where the pattern was found.
* The size of the data that matches the pattern.
* The data that matches the pattern.
"""
return (address, size, data)
#------------------------------------------------------------------------------
class BytePattern (Pattern):
"""
Fixed byte pattern.
@type pattern: str
@ivar pattern: Byte string to search for.
@type length: int
@ivar length: Length of the byte pattern.
"""
def __init__(self, pattern):
"""
@type pattern: str
@param pattern: Byte string to search for.
"""
self.pattern = str(pattern)
self.length = len(pattern)
def __len__(self):
"""
Returns the exact length of the pattern.
@see: L{Pattern.__len__}
"""
return self.length
def find(self, buffer, pos = None):
return buffer.find(self.pattern, pos), self.length
#------------------------------------------------------------------------------
# FIXME: case insensitive compat.unicode searches are probably buggy!
class TextPattern (BytePattern):
"""
Text pattern.
@type isUnicode: bool
@ivar isUnicode: C{True} if the text to search for is a compat.unicode string,
C{False} otherwise.
@type encoding: str
@ivar encoding: Encoding for the text parameter.
Only used when the text to search for is a Unicode string.
Don't change unless you know what you're doing!
@type caseSensitive: bool
@ivar caseSensitive: C{True} of the search is case sensitive,
C{False} otherwise.
"""
def __init__(self, text, encoding = "utf-16le", caseSensitive = False):
"""
@type text: str or compat.unicode
@param text: Text to search for.
@type encoding: str
@param encoding: (Optional) Encoding for the text parameter.
Only used when the text to search for is a Unicode string.
Don't change unless you know what you're doing!
@type caseSensitive: bool
@param caseSensitive: C{True} of the search is case sensitive,
C{False} otherwise.
"""
self.isUnicode = isinstance(text, compat.unicode)
self.encoding = encoding
self.caseSensitive = caseSensitive
if not self.caseSensitive:
pattern = text.lower()
if self.isUnicode:
pattern = text.encode(encoding)
super(TextPattern, self).__init__(pattern)
def read(self, process, address, size):
data = super(TextPattern, self).read(address, size)
if not self.caseSensitive:
if self.isUnicode:
try:
encoding = self.encoding
text = data.decode(encoding, "replace")
text = text.lower()
new_data = text.encode(encoding, "replace")
if len(data) == len(new_data):
data = new_data
else:
data = data.lower()
except Exception:
data = data.lower()
else:
data = data.lower()
return data
def found(self, address, size, data):
if self.isUnicode:
try:
data = compat.unicode(data, self.encoding)
except Exception:
## traceback.print_exc() # XXX DEBUG
return None
return (address, size, data)
#------------------------------------------------------------------------------
class RegExpPattern (Pattern):
"""
Regular expression pattern.
@type pattern: str
@ivar pattern: Regular expression in text form.
@type flags: int
@ivar flags: Regular expression flags.
@type regexp: re.compile
@ivar regexp: Regular expression in compiled form.
@type maxLength: int
@ivar maxLength:
Maximum expected length of the strings matched by this regular
expression.
This value will be used to calculate the required buffer size when
doing buffered searches.
Ideally it should be an exact value, but in some cases it's not
possible to calculate so an upper limit should be given instead.
If that's not possible either, C{None} should be used. That will
cause an exception to be raised if this pattern is used in a
buffered search.
"""
def __init__(self, regexp, flags = 0, maxLength = None):
"""
@type regexp: str
@param regexp: Regular expression string.
@type flags: int
@param flags: Regular expression flags.
@type maxLength: int
@param maxLength: Maximum expected length of the strings matched by
this regular expression.
This value will be used to calculate the required buffer size when
doing buffered searches.
Ideally it should be an exact value, but in some cases it's not
possible to calculate so an upper limit should be given instead.
If that's not possible either, C{None} should be used. That will
cause an exception to be raised if this pattern is used in a
buffered search.
"""
self.pattern = regexp
self.flags = flags
self.regexp = re.compile(regexp, flags)
self.maxLength = maxLength
def __len__(self):
"""
Returns the maximum expected length of the strings matched by this
pattern. This value is taken from the C{maxLength} argument of the
constructor if this class.
Ideally it should be an exact value, but in some cases it's not
possible to calculate so an upper limit should be returned instead.
If that's not possible either an exception must be raised.
This value will be used to calculate the required buffer size when
doing buffered searches.
"""
if self.maxLength is None:
raise NotImplementedError()
return self.maxLength
def find(self, buffer, pos = None):
if not pos: # make sure pos is an int
pos = 0
match = self.regexp.search(buffer, pos)
if match:
start, end = match.span()
return start, end - start
return -1, 0
#------------------------------------------------------------------------------
class HexPattern (RegExpPattern):
"""
Hexadecimal pattern.
Hex patterns must be in this form::
"68 65 6c 6c 6f 20 77 6f 72 6c 64" # "hello world"
Spaces are optional. Capitalization of hex digits doesn't matter.
This is exactly equivalent to the previous example::
"68656C6C6F20776F726C64" # "hello world"
Wildcards are allowed, in the form of a C{?} sign in any hex digit::
"5? 5? c3" # pop register / pop register / ret
"b8 ?? ?? ?? ??" # mov eax, immediate value
@type pattern: str
@ivar pattern: Hexadecimal pattern.
"""
def __new__(cls, pattern):
"""
If the pattern is completely static (no wildcards are present) a
L{BytePattern} is created instead. That's because searching for a
fixed byte pattern is faster than searching for a regular expression.
"""
if '?' not in pattern:
return BytePattern( HexInput.hexadecimal(pattern) )
return object.__new__(cls, pattern)
def __init__(self, hexa):
"""
Hex patterns must be in this form::
"68 65 6c 6c 6f 20 77 6f 72 6c 64" # "hello world"
Spaces are optional. Capitalization of hex digits doesn't matter.
This is exactly equivalent to the previous example::
"68656C6C6F20776F726C64" # "hello world"
Wildcards are allowed, in the form of a C{?} sign in any hex digit::
"5? 5? c3" # pop register / pop register / ret
"b8 ?? ?? ?? ??" # mov eax, immediate value
@type hexa: str
@param hexa: Pattern to search for.
"""
maxLength = len([x for x in hexa
if x in "?0123456789ABCDEFabcdef"]) / 2
super(HexPattern, self).__init__(HexInput.pattern(hexa),
maxLength = maxLength)
#==============================================================================
class Search (StaticClass):
"""
Static class to group the search functionality.
Do not instance this class! Use its static methods instead.
"""
# TODO: aligned searches
# TODO: method to coalesce search results
# TODO: search memory dumps
# TODO: search non-ascii C strings
@staticmethod
def search_process(process, pattern, minAddr = None,
maxAddr = None,
bufferPages = None,
overlapping = False):
"""
Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
# Do some namespace lookups of symbols we'll be using frequently.
MEM_COMMIT = win32.MEM_COMMIT
PAGE_GUARD = win32.PAGE_GUARD
page = MemoryAddresses.pageSize
read = pattern.read
find = pattern.find
# Calculate the address range.
if minAddr is None:
minAddr = 0
if maxAddr is None:
maxAddr = win32.LPVOID(-1).value # XXX HACK
# Calculate the buffer size from the number of pages.
if bufferPages is None:
try:
size = MemoryAddresses.\
align_address_to_page_end(len(pattern)) + page
except NotImplementedError:
size = None
elif bufferPages > 0:
size = page * (bufferPages + 1)
else:
size = None
# Get the memory map of the process.
memory_map = process.iter_memory_map(minAddr, maxAddr)
# Perform search with buffering enabled.
if size:
# Loop through all memory blocks containing data.
buffer = "" # buffer to hold the memory data
prev_addr = 0 # previous memory block address
last = 0 # position of the last match
delta = 0 # delta of last read address and start of buffer
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress # current address to search on
block_size = mbi.RegionSize # total size of the block
if address >= maxAddr:
break
end = address + block_size # end address of the block
# If the block is contiguous to the previous block,
# coalesce the new data in the buffer.
if delta and address == prev_addr:
buffer += read(process, address, page)
# If not, clear the buffer and read new data.
else:
buffer = read(process, address, min(size, block_size))
last = 0
delta = 0
# Search for the pattern in this block.
while 1:
# Yield each match of the pattern in the buffer.
pos, length = find(buffer, last)
while pos >= last:
match_addr = address + pos - delta
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last)
# Advance to the next page.
address = address + page
block_size = block_size - page
prev_addr = address
# Fix the position of the last match.
last = last - page
if last < 0:
last = 0
# Remove the first page in the buffer.
buffer = buffer[ page : ]
delta = page
# If we haven't reached the end of the block yet,
# read the next page in the block and keep seaching.
if address < end:
buffer = buffer + read(process, address, page)
# Otherwise, we're done searching this block.
else:
break
# Perform search with buffering disabled.
else:
# Loop through all memory blocks containing data.
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress
block_size = mbi.RegionSize
if address >= maxAddr:
break;
# Read the whole memory region.
buffer = process.read(address, block_size)
# Search for the pattern in this region.
pos, length = find(buffer)
last = 0
while pos >= last:
match_addr = address + pos
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last)
@classmethod
def extract_ascii_strings(cls, process, minSize = 4, maxSize = 1024):
"""
Extract ASCII strings from the process memory.
@type process: L{Process}
@param process: Process to search.
@type minSize: int
@param minSize: (Optional) Minimum size of the strings to search for.
@type maxSize: int
@param maxSize: (Optional) Maximum size of the strings to search for.
@rtype: iterator of tuple(int, int, str)
@return: Iterator of strings extracted from the process memory.
Each tuple contains the following:
- The memory address where the string was found.
- The size of the string.
- The string.
"""
regexp = r"[\s\w\!\@\#\$\%%\^\&\*\(\)\{\}\[\]\~\`\'\"\:\;\.\,\\\/\-\+\=\_\<\>]{%d,%d}\0" % (minSize, maxSize)
pattern = RegExpPattern(regexp, 0, maxSize)
return cls.search_process(process, pattern, overlapping = False)
|
koobonil/Boss2D
|
refs/heads/master
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/kernel_tests/large_concat_op_test.py
|
133
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class LargeConcatOpTest(test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with ops.device("/cpu:0"):
a = array_ops.ones([2**31 + 6], dtype=dtypes.int8)
b = array_ops.zeros([1024], dtype=dtypes.int8)
onezeros = array_ops.concat([a, b], 0)
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = onezeros.eval()
if __name__ == "__main__":
test.main()
|
praba230890/frappe
|
refs/heads/develop
|
frappe/core/page/modules_setup/modules_setup.py
|
42
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def update(ml):
"""update modules"""
frappe.db.set_global('hidden_modules', ml)
frappe.msgprint(frappe._('Updated'))
frappe.clear_cache()
|
Peddle/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/comments/signals.py
|
311
|
"""
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 400 response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
dwf/numpy
|
refs/heads/master
|
numpy/polynomial/tests/test_hermite_e.py
|
5
|
"""Tests for hermite_e module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import *
He0 = np.array([ 1 ])
He1 = np.array([ 0 , 1 ])
He2 = np.array([ -1 ,0 , 1 ])
He3 = np.array([ 0 , -3 ,0 , 1 ])
He4 = np.array([ 3 ,0 , -6 ,0 , 1 ])
He5 = np.array([ 0 , 15 ,0 , -10 ,0 , 1 ])
He6 = np.array([ -15 ,0 , 45 ,0 , -15 ,0 , 1 ])
He7 = np.array([ 0 , -105 ,0 , 105 ,0 , -21 ,0 , 1 ])
He8 = np.array([ 105 ,0 , -420 ,0 , 210 ,0 , -28 ,0 , 1 ])
He9 = np.array([ 0 , 945 ,0 , -1260 ,0 , 378 ,0 , -36 ,0 , 1 ])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x) :
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase) :
def test_hermedomain(self) :
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self) :
assert_equal(herme.hermezero, [0])
def test_hermeone(self) :
assert_equal(herme.hermeone, [1])
def test_hermex(self) :
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase) :
x = np.linspace(-3, 3, 100)
def test_hermeadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0,1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self) :
# check values of result
for i in range(5) :
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase) :
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self) :
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
y = [polyval(x, c) for c in Helist]
for i in range(10) :
msg = "At i=%d" % i
ser = np.zeros
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1,0]).shape, dims)
assert_equal(herme.hermeval(x, [1,0,0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2,3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2,3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self) :
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase) :
def test_hermeder(self) :
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self) :
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self) :
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self) :
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:,None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase) :
def test_hermefromroots(self) :
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self) :
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self) :
assert_equal(herme.hermeline(3,4), [3, 4])
def test_herme2poly(self) :
for i in range(10) :
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self) :
for i in range(10) :
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
Airphrame/mapnik
|
refs/heads/master
|
deps/mapnik/build.py
|
8
|
import os
from glob import glob
Import('env')
subdirs = {
'sparsehash':'sparsehash',
'sparsehash/internal':'sparsehash/internal',
'../agg/include':'agg',
'../clipper/include':'agg'
}
if 'install' in COMMAND_LINE_TARGETS:
for k,v in subdirs.items():
pathdir = os.path.join(k,'*')
includes = glob(pathdir)
inc_target = os.path.normpath(env['INSTALL_PREFIX']+'/include/mapnik/'+v)
env.Alias(target='install', source=env.Install(inc_target, includes))
|
40223101/w17test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/_dummy_thread.py
|
742
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/networkx/algorithms/isomorphism/isomorph.py
|
30
|
"""
Graph isomorphism functions.
"""
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Christopher Ellison cellison@cse.ucdavis.edu)'])
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['could_be_isomorphic',
'fast_could_be_isomorphic',
'faster_could_be_isomorphic',
'is_isomorphic']
def could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree, triangle, and number of cliques sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=G1.degree()
t1=nx.triangles(G1)
c1=nx.number_of_cliques(G1)
props1=[ [d1[v], t1[v], c1[v]] for v in d1 ]
props1.sort()
d2=G2.degree()
t2=nx.triangles(G2)
c2=nx.number_of_cliques(G2)
props2=[ [d2[v], t2[v], c2[v]] for v in d2 ]
props2.sort()
if props1 != props2:
return False
# OK...
return True
graph_could_be_isomorphic=could_be_isomorphic
def fast_could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree and triangle sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=G1.degree()
t1=nx.triangles(G1)
props1=[ [d1[v], t1[v]] for v in d1 ]
props1.sort()
d2=G2.degree()
t2=nx.triangles(G2)
props2=[ [d2[v], t2[v]] for v in d2 ]
props2.sort()
if props1 != props2: return False
# OK...
return True
fast_graph_could_be_isomorphic=fast_could_be_isomorphic
def faster_could_be_isomorphic(G1,G2):
"""Returns False if graphs are definitely not isomorphic.
True does NOT guarantee isomorphism.
Parameters
----------
G1, G2 : graphs
The two graphs G1 and G2 must be the same type.
Notes
-----
Checks for matching degree sequences.
"""
# Check global properties
if G1.order() != G2.order(): return False
# Check local properties
d1=list(G1.degree().values())
d1.sort()
d2=list(G2.degree().values())
d2.sort()
if d1 != d2: return False
# OK...
return True
faster_graph_could_be_isomorphic=faster_could_be_isomorphic
def is_isomorphic(G1, G2, node_match=None, edge_match=None):
"""Returns True if the graphs G1 and G2 are isomorphic and False otherwise.
Parameters
----------
G1, G2: graphs
The two graphs G1 and G2 must be the same type.
node_match : callable
A function that returns True if node n1 in G1 and n2 in G2 should
be considered equal during the isomorphism test.
If node_match is not specified then node attributes are not considered.
The function will be called like
node_match(G1.node[n1], G2.node[n2]).
That is, the function will receive the node attribute dictionaries
for n1 and n2 as inputs.
edge_match : callable
A function that returns True if the edge attribute dictionary
for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should
be considered equal during the isomorphism test. If edge_match is
not specified then edge attributes are not considered.
The function will be called like
edge_match(G1[u1][v1], G2[u2][v2]).
That is, the function will receive the edge attribute dictionaries
of the edges under consideration.
Notes
-----
Uses the vf2 algorithm [1]_.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
For digraphs G1 and G2, using 'weight' edge attribute (default: 1)
>>> G1 = nx.DiGraph()
>>> G2 = nx.DiGraph()
>>> G1.add_path([1,2,3,4],weight=1)
>>> G2.add_path([10,20,30,40],weight=2)
>>> em = iso.numerical_edge_match('weight', 1)
>>> nx.is_isomorphic(G1, G2) # no weights considered
True
>>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights
False
For multidigraphs G1 and G2, using 'fill' node attribute (default: '')
>>> G1 = nx.MultiDiGraph()
>>> G2 = nx.MultiDiGraph()
>>> G1.add_nodes_from([1,2,3],fill='red')
>>> G2.add_nodes_from([10,20,30,40],fill='red')
>>> G1.add_path([1,2,3,4],weight=3, linewidth=2.5)
>>> G2.add_path([10,20,30,40],weight=3)
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, node_match=nm)
True
For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
>>> G1.add_edge(1,2, weight=7)
>>> G2.add_edge(10,20)
>>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
>>> nx.is_isomorphic(G1, G2, edge_match=em)
True
For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes
with default values 7 and 2.5. Also using 'fill' node attribute with
default value 'red'.
>>> em = iso.numerical_multiedge_match(['weight', 'linewidth'], [7, 2.5])
>>> nm = iso.categorical_node_match('fill', 'red')
>>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm)
True
See Also
--------
numerical_node_match, numerical_edge_match, numerical_multiedge_match
categorical_node_match, categorical_edge_match, categorical_multiedge_match
References
----------
.. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento,
"An Improved Algorithm for Matching Large Graphs",
3rd IAPR-TC15 Workshop on Graph-based Representations in
Pattern Recognition, Cuen, pp. 149-159, 2001.
http://amalfi.dis.unina.it/graph/db/papers/vf-algorithm.pdf
"""
if G1.is_directed() and G2.is_directed():
GM = nx.algorithms.isomorphism.DiGraphMatcher
elif (not G1.is_directed()) and (not G2.is_directed()):
GM = nx.algorithms.isomorphism.GraphMatcher
else:
raise NetworkXError("Graphs G1 and G2 are not of the same type.")
gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
return gm.is_isomorphic()
|
balloob/home-assistant
|
refs/heads/dev
|
homeassistant/components/mobile_app/const.py
|
12
|
"""Constants for mobile_app."""
DOMAIN = "mobile_app"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_CLOUDHOOK_URL = "cloudhook_url"
CONF_REMOTE_UI_URL = "remote_ui_url"
CONF_SECRET = "secret"
CONF_USER_ID = "user_id"
DATA_BINARY_SENSOR = "binary_sensor"
DATA_CONFIG_ENTRIES = "config_entries"
DATA_DELETED_IDS = "deleted_ids"
DATA_DEVICES = "devices"
DATA_SENSOR = "sensor"
DATA_STORE = "store"
ATTR_APP_DATA = "app_data"
ATTR_APP_ID = "app_id"
ATTR_APP_NAME = "app_name"
ATTR_APP_VERSION = "app_version"
ATTR_CONFIG_ENTRY_ID = "entry_id"
ATTR_DEVICE_ID = "device_id"
ATTR_DEVICE_NAME = "device_name"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MODEL = "model"
ATTR_OS_NAME = "os_name"
ATTR_OS_VERSION = "os_version"
ATTR_PUSH_TOKEN = "push_token"
ATTR_PUSH_URL = "push_url"
ATTR_PUSH_RATE_LIMITS = "rateLimits"
ATTR_PUSH_RATE_LIMITS_ERRORS = "errors"
ATTR_PUSH_RATE_LIMITS_MAXIMUM = "maximum"
ATTR_PUSH_RATE_LIMITS_RESETS_AT = "resetsAt"
ATTR_PUSH_RATE_LIMITS_SUCCESSFUL = "successful"
ATTR_SUPPORTS_ENCRYPTION = "supports_encryption"
ATTR_EVENT_DATA = "event_data"
ATTR_EVENT_TYPE = "event_type"
ATTR_TEMPLATE = "template"
ATTR_TEMPLATE_VARIABLES = "variables"
ATTR_SPEED = "speed"
ATTR_ALTITUDE = "altitude"
ATTR_COURSE = "course"
ATTR_VERTICAL_ACCURACY = "vertical_accuracy"
ATTR_WEBHOOK_DATA = "data"
ATTR_WEBHOOK_ENCRYPTED = "encrypted"
ATTR_WEBHOOK_ENCRYPTED_DATA = "encrypted_data"
ATTR_WEBHOOK_TYPE = "type"
ERR_ENCRYPTION_ALREADY_ENABLED = "encryption_already_enabled"
ERR_ENCRYPTION_NOT_AVAILABLE = "encryption_not_available"
ERR_ENCRYPTION_REQUIRED = "encryption_required"
ERR_SENSOR_NOT_REGISTERED = "not_registered"
ERR_INVALID_FORMAT = "invalid_format"
ATTR_SENSOR_ATTRIBUTES = "attributes"
ATTR_SENSOR_DEVICE_CLASS = "device_class"
ATTR_SENSOR_ICON = "icon"
ATTR_SENSOR_NAME = "name"
ATTR_SENSOR_STATE = "state"
ATTR_SENSOR_TYPE = "type"
ATTR_SENSOR_TYPE_BINARY_SENSOR = "binary_sensor"
ATTR_SENSOR_TYPE_SENSOR = "sensor"
ATTR_SENSOR_UNIQUE_ID = "unique_id"
ATTR_SENSOR_UOM = "unit_of_measurement"
SIGNAL_SENSOR_UPDATE = f"{DOMAIN}_sensor_update"
SIGNAL_LOCATION_UPDATE = DOMAIN + "_location_update_{}"
ATTR_CAMERA_ENTITY_ID = "camera_entity_id"
|
morucci/repoxplorer
|
refs/heads/master
|
bin/bench/fake-commit-gen.py
|
1
|
#!/usr/bin/python
# Copyright 2016, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import random
import hashlib
from repoxplorer.index.commits import Commits
from repoxplorer import index
epoch_start = 1476633000
def create_random_str(lenght=6):
value = "".join([random.choice(string.ascii_lowercase)
for _ in range(lenght)])
return value
def gen_emails(amount):
ret = []
for i in range(amount):
email = "%s@%s.%s" % (
create_random_str(8),
create_random_str(5),
create_random_str(3),
)
name = "%s %s" % (
create_random_str(8),
create_random_str(8),
)
ret.append((name, email))
return ret
def gen_commit_msg():
return " ".join([create_random_str(random.randint(0, 10))
for _ in range(5)])
def gen_fake_commits(amount=10000):
print("Start generation of %s fake commits" % amount)
email_amount = amount * 0.03
email_amount = int(email_amount)
if not email_amount:
email_amount = 1
emails = gen_emails(email_amount)
project = '%s:%s:%s' % (
'https://github.com/openstack/test',
'test', 'master')
ret = []
for i in range(amount):
author_date = random.randint(
epoch_start, epoch_start + 1000000)
author = emails[random.randint(0, email_amount - 1)]
committer = emails[random.randint(0, email_amount - 1)]
c = {}
c['sha'] = hashlib.sha256(create_random_str(10)).hexdigest()
c['author_name'] = author[0]
c['committer_name'] = committer[0]
c['author_email'] = author[1]
c['committer_email'] = committer[1]
c['author_date'] = author_date
c['committer_date'] = random.randint(
author_date + 1, author_date + 10000)
c['ttl'] = random.randint(0, 10000)
c['commit_msg'] = gen_commit_msg()
c['line_modifieds'] = random.randint(0, 10000)
c['merge_commit'] = False
c['projects'] = [project, ]
ret.append(c)
print("Generation of %s fake commits done." % amount)
return ret
if __name__ == '__main__':
amount = 100000
c = Commits(index.Connector())
c.add_commits(gen_fake_commits(amount))
print("Indexation done.")
|
gravyboat/streamlink
|
refs/heads/master
|
tests/plugins/test_goltelevision.py
|
7
|
import unittest
from streamlink.plugins.goltelevision import GOLTelevision
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(GOLTelevision.can_handle_url("http://www.goltelevision.com/live"))
self.assertTrue(GOLTelevision.can_handle_url("http://goltelevision.com/live"))
self.assertTrue(GOLTelevision.can_handle_url("https://goltelevision.com/live"))
self.assertTrue(GOLTelevision.can_handle_url("https://www.goltelevision.com/live"))
# shouldn't match
self.assertFalse(GOLTelevision.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(GOLTelevision.can_handle_url("http://www.youtube.com/"))
|
abhi12ravi/newflaskapp
|
refs/heads/master
|
routes.py
|
1
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True)
|
mramire8/active
|
refs/heads/master
|
datautil/load_data.py
|
1
|
__author__ = 'mramire8'
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from sklearn.datasets import fetch_20newsgroups
from sklearn.cross_validation import train_test_split, ShuffleSplit
import numpy as np
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
from goose import Goose
from lxml import etree
from bs4 import BeautifulSoup
from boilerpipe.extract import Extractor
from os import listdir
# import bunch
# from bunch import Bunch
from sklearn.datasets import base as bunch
import os
import pickle
import json
from sklearn.utils.validation import check_random_state
if "nt" in os.name:
IMDB_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/aclImdb/raw-data'
AVI_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/sraa/sraa/sraa/partition1/data'
# AVI_HOME = 'C:/Users/mramire8/Documents/Research/Oracle confidence and Interruption/dataset/sraa/sraa/sraa/partition1/dummy'
TWITTER_HOME="C:/Users/mramire8/Documents/Datasets/twitter"
else:
IMDB_HOME = '/Users/maru/Dataset/aclImdb'
AVI_HOME = '/Users/maru/Dataset/aviation/data'
TWITTER_HOME="/Users/maru/Dataset/twitter"
def keep_header_subject(text, keep_subject=False):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
sub = [l for l in _before.split("\n") if "Subject:" in l]
if keep_subject:
final = sub[0] + "\n" + after
else:
final = after
return final
def load_20newsgroups(categories=None, vectorizer=CountVectorizer(min_df=5, max_df=1.0, binary=False), min_size=None,
fix_k=None, raw=False):
print "Loading 20 newsgroups dataset for categories:", categories
data = bunch.Bunch()
data.train = fetch_20newsgroups(subset='train', categories=categories, remove=('headers','footers', 'quotes'),
shuffle=True, random_state=42)
data.train.data = [keep_header_subject(text) for text in data.train.data]
data.test = fetch_20newsgroups(subset='test', categories=categories, remove=('headers','footers', 'quotes'),
shuffle=True, random_state=42)
data.test.data = [keep_header_subject(text) for text in data.test.data]
print 'data loaded'
categories = data.train.target_names
print "%d categories" % len(categories)
print
if not raw:
data = process_data(data, fix_k, min_size, vectorizer)
return data
def load_imdb(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):
"""
load text files from IMDB movie reviews from folders to memory
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: ranom seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
#analizer = vct.build_tokenizer()
# C:\Users\mramire8\Documents\Research\Oracle confidence and Interruption\dataset\aclImdb\raw-data
data = bunch.Bunch()
if subset in ('train', 'test'):
data[subset] = load_files("{0}/{1}".format(IMDB_HOME, subset), encoding="latin-1", load_content=True,
random_state=rnd)
elif subset == "all":
data["train"] = load_files("{0}/{1}".format(IMDB_HOME, "train"), encoding="latin-1", load_content=True,
random_state=rnd)
data["test"] = load_files("{0}/{1}".format(IMDB_HOME, "test"), encoding="latin-1", load_content=True,
random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def load_aviation(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from Aviation-auto dataset from folders to memory. It will return a 25-75 percent train test split
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(AVI_HOME, encoding="latin1", load_content=True,
random_state=rnd)
data.data = [keep_header_subject(text) for text in data.data]
# data["test"] = load_files("{0}/{1}".format(AVI_HOME, "test"), encoding="latin1", load_content=True,
# random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
# train_x, test_x, train_y, test_y = train_test_split(data.data, data.target, test_size=0.25,
# random_state=rnd)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.filenames = data.train.filenames[indices]
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
## convert the tweet into a data format of text documents
# from sklearn.datasets.base import Bunch
def preprocess(string, lowercase, collapse_urls, collapse_mentions):
import re
if not string:
return ""
if lowercase:
string = string.lower()
# tokens = []
if collapse_urls:
string = re.sub('http\S+', 'THIS_IS_A_URL', string)
if collapse_mentions:
string = re.sub('@\S+', 'THIS_IS_A_MENTION', string)
# if prefix:
# tokens = ['%s%s' % (prefix, t) for t in tokens]
return string
def timeline_to_doc(user, *args):
tweets = []
for tw in user:
tweets.append(preprocess(tw['text'], *args))
return tweets
def user_to_doc(users, *args):
timeline = []
user_names = []
user_id = []
for user in users:
timeline.append(timeline_to_doc(user, *args))
user_names.append(user[0]['user']['name'])
user_id.append(user[0]['user']['screen_name'])
return user_id, user_names, timeline
def bunch_users(class1, class2, vct, lowercase, collapse_urls, collapse_mentions, rnd, class_name=None):
labels = None
if labels is None:
labels = [0,1]
user_id, user_names, timeline = user_to_doc(class1, lowercase, collapse_urls, collapse_mentions)
user_id2, user_names2, timeline2 = user_to_doc(class2, lowercase, collapse_urls, collapse_mentions)
target = [labels[0]] * len(user_id)
user_id.extend(user_id2)
user_names.extend(user_names2)
timeline.extend(timeline2)
target.extend([labels[1]] * len(user_id2))
user_text = ["######".join(t) for t in timeline]
data = bunch.Bunch(data=user_text, target=target, user_id=user_id,
user_name=user_names, user_timeline=timeline)
# data = {'data':timeline, 'target':np.array(target), 'user_id':user_id, 'user_name':user_names, 'user_text':user_text}
random_state = np.random.RandomState(rnd)
indices = np.arange(len(data.target))
random_state.shuffle(indices)
data.target = np.array(data.target)[indices]
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
data.user_id = np.array(data.user_id)[indices]
data.user_name = np.array(data.user_name)[indices]
data.user_timeline = np.array(data.user_timeline)[indices]
data.target_names = class_name
return data
import datetime
def get_date(date_str):
return datetime.datetime.strptime(date_str.strip('"'), "%a %b %d %H:%M:%S +0000 %Y")
def convert_tweet_2_data(data_path, vct, rnd):
"""
Convert tweet time lines into dataset
:param data_path:
:param vct:
:return: bunch.Bunch
Bunch with the data in train and test from twitter bots and human accounts
"""
good = get_tweets_file(data_path + "/good.json")
print "Real users %s" % (len(good))
bots = get_tweets_file(data_path + "/bots.json")
print "Bot users %s" % (len(bots))
gds = [g for g in good if get_date(g[0]['created_at']).year > 2013]
bts = [b for b in bots if get_date(b[0]['created_at']).year > 2013]
data = bunch_users(gds,bts, vct, True, True, True, rnd, class_name=['good', 'bots'])
return data
def get_tweets_file(path):
f = open(path)
i = 0
users = []
data=[]
last = 0
for line in f:
data = line.split("]][[")
last = len(data)
for i,tweets in enumerate(data):
if i == 0:
t = json.loads(tweets[1:] + "]")
elif i == (last-1):
t = json.loads("["+tweets[:-1])
else:
t = json.loads("["+tweets+"]")
users.append(t)
return users
def load_twitter(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from twitter data
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = convert_tweet_2_data(TWITTER_HOME, vct, rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
ARXIV_HOME = 'C:/Users/mramire8/Documents/Datasets/arxiv'
def load_arxiv(path, categories=None, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from Aviation-auto dataset from folders to memory. It will return a 25-75 percent train test split
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
raise Exception("We are not ready for train test arxiv data yet")
elif subset == "all":
data = load_files(ARXIV_HOME, encoding="latin1", load_content=True,
random_state=rnd, categories=categories)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def load_dummy(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):
"""
load text files from IMDB movie reviews from folders to memory
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
data[subset] = load_files("{0}/{1}".format(path, subset), charset="latin1", load_content=True, random_state=rnd)
elif subset == "all":
data["train"] = load_files("{0}/{1}".format(path, "train"), charset="latin1", load_content=True,
random_state=rnd)
data["test"] = load_files("{0}/{1}".format(path, "test"), charset="latin1", load_content=True, random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def process_data(data, fix_k, min_size, vct, silent=True):
# create a fixed k version of the data
analizer = vct.build_tokenizer()
fixk = bunch.Bunch()
fixk.all = data.train.data
if fix_k is not None:
# TODO check the size by simple split or by analizer?
fixk.kwords = [" ".join(analizer(doc)[0:fix_k]) for doc in data.train.data]
#fixk.kwords = [" ".join(doc.split(" ")[0:fix_k]) for doc in data.train.data]
else:
fixk.kwords = data.train.data
print "Total Documents: %s" % len(fixk.kwords) if silent else ""
fixk.target = data.train.target
print "Minimum size: %s" % min_size if silent else ""
if min_size is not None:
filtered = [(x, y, z) for x, y, z in zip(data.train.data, fixk.kwords, fixk.target)
if len(analizer(x)) >= min_size]
fixk.all = [x[0] for x in filtered] # all words
fixk.kwords = [x[1] for x in filtered] # k words
fixk.target = np.array([x[2] for x in filtered], dtype=int) # targets
print "Fix k: %s" % fix_k if silent else ""
print "Docs left: %s" % len(fixk.all) if silent else ""
print "Vectorizing ..." if silent else ""
# add the target values
# add a field for the vectorized data
data.train.data = fixk.all # raw documents
try:
data.train.bow = vct.transform(fixk.all) # docs with all the words bow
except ValueError:
data.train.bow = vct.fit_transform(fixk.all) # docs with all the words bow
data.train.bowk = vct.transform(fixk.kwords) # docs with k words bow
data.train.kwords = fixk.kwords # docs with k words
data.train.target = fixk.target
data.test.bow = vct.transform(data.test.data) # traget
return data
def load_dataset(name, fixk, categories, vct, min_size, raw=False, percent=.5):
data = bunch.Bunch()
if "imdb" in name:
########## IMDB MOVIE REVIEWS ###########
# data = bunch.Bunch(load_imdb(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size, fix_k=fixk, raw=raw)) # should brind data as is
data = load_imdb(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw) # should brind data as is
elif "aviation" in name:
########## sraa dataset ######
data = load_aviation(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "arxiv" in name:
########## sraa dataset ######
data = load_arxiv(name, categories=categories, shuffle=True, rnd=2356, vct=vct, min_size=None,
fix_k=None, raw=raw, percent=percent)
elif "20news" in name:
########## 20 news groups ######
data = load_20newsgroups(categories=categories, vectorizer=vct, min_size=min_size,
fix_k=fixk, raw=raw)
elif "bgender" in name:
########## 20 news groups ######
data = load_bloggender(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "gmo" in name:
########## article pro-con gmo ######
data = load_gmo(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "evergreen" in name:
########## evergreen content blogs ######
data = load_evergreen(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "pan" in name:
########## author gender classification from blogs ######
data = load_blogpan(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "webkb" in name:
# raise Exception("We are not ready for that data yet")
data = load_webkb(name, categories=categories, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "biocreative" in name:
# raise Exception("We are not ready for that data yet")
data = load_biocreative(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "twitter" in name:
# raise Exception("We are not ready for that data yet")
data = load_twitter(name, shuffle=True, rnd=2356, vct=vct, min_size=min_size,
fix_k=fixk, raw=raw, percent=percent)
elif "dummy" in name:
########## DUMMY DATA###########
data = load_dummy("C:/Users/mramire8/Documents/code/python/data/dummy", shuffle=True, rnd=2356,
vct=vct, min_size=0, fix_k=fixk, raw=raw)
else:
raise Exception("We do not know that dataset")
return data
def load_dictionary(datafile=""):
f = open(datafile)
with f:
line = f.readlines()
line = [l.strip() for l in line]
return line
def load_documents(datafile="", header=True):
f = open(datafile)
feature_names = []
if header:
feature_names = f.readline().split() # skip the header
# print ('HEADER NAMES: \n %s' % feature_names)
docs = []
with f:
# uniqueid truelabel text words seenwords avgtime
line = f.readlines()
docs = [l.strip().split('\t') for l in line]
#b = [ai for ai in a if ai % 2 == 0] # another way to do filtering when loading the datasets
return docs, feature_names
def load_from_file(train, categories, fixk, min_size, vct, raw=True):
fixk_saved = "{0}-MIN{1}.p".format(train, min_size)
try:
print "Loading existing file... %s " % train
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
fixk_file.close()
# vectorizer = open("{0}vectorizer.p".format(train), "rb")
# vct = pickle.load(vectorizer)
# vectorizer.close()
except (IOError, ValueError):
print "Loading from scratch..."
data = load_dataset(train, fixk, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data, fixk_file)
fixk_file.close()
# vectorizer = open("{0}vectorizer.p".format(train), "wb")
# pickle.dump(vct, vectorizer)
# vectorizer.close()
return data, vct
BLOGGEN_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender/blog-gender-dataset.tsv"
def load_bloggender(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
import csv
docs = []
labels = []
clases = ['F', 'M']
with open(BLOGGEN_HOME, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for row in reader:
if len(row[0])>0 and len(row[1])>0:
docs.append(row[0])
labels.append(clases.index(row[1].strip().upper()))
data = bunch.Bunch()
data.data = docs
data.target=np.array(labels)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind], target_names=clases),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind], target_names=clases))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
PAN13_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender/blogs/blogs"
def load_pan13(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(PAN13_HOME, encoding="latin1", load_content=True,
random_state=rnd)
data.data = [keep_header_subject(text) for text in data.data]
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
for iDoc in data.data:
pass
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
EVERGREEN_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/evergreen"
def get_content(url):
g = Goose({'enable_image_fetching':False})
article = g.extract(url=url)
# article = g.extract(raw_html=url)
text = "{0} {1}".format(article.title, article.cleaned_text)
return text
def read_evergreenjs(filename):
import csv
docs = []
labels = []
# i =0
## EVERGREEN = 0, NON-EVERGREEN=1
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
header = None
for row in reader:
# print row
if header is None:
header = row
else:
## Original boiler plate the text has not punctuation
# content = json.loads(row[header.index('boilerplate')])
# content['title']
# if len(content)>1 and content['body'] is not None:
# docs.append(content['body'])
# labels.append(int(row[header.index('label')]))
## EXTRACT BODY-ISH OF THE HTML FILE
url = "{0}/raw_content/{1}.".format(EVERGREEN_HOME, row[header.index('urlid')])
text = open(url).read()
soup = BeautifulSoup(text)
# print "*"*50
# remove non-text tags
for tag in ['script', 'style', 'a', 'img']:
for el in soup.find_all(tag):
el.extract()
extractor = Extractor(extractor='ArticleExtractor', html=unicode(soup.get_text()))
## ADD CONTENT AND LABEL TO THE LIST
docs.append(extractor.getText())
# docs.append(get_content(url))
labels.append(int(row[header.index('label')]))
# print i
# i+=1
return docs, labels
def load_evergreen(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
docs = []
labels = []
## EVERGREEN = 0, NON-EVERGREEN=1
clases = ['EVERGREEN', 'SEASONAL']
filename = "{0}/{1}".format(EVERGREEN_HOME, "train.tsv")
docs, labels = read_evergreenjs(filename)
# filename = "{0}/{1}".format(EVERGREEN_HOME, "test.tsv")
# docst, labelst = read_evergreenjs(filename)
# data = bunch.Bunch(train=bunch.Bunch(data=docs, target=np.array(labels)),
# test=bunch.Bunch(data=docst, target=np.array(labelst)))
data = bunch.Bunch()
data.data = docs
data.target=np.array(labels)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind], target_names=clases),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind], target_names=clases))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def create_gmo(file):
docs, _ = load_documents(file, header=False)
content = []
iDoc = []
for line in docs:
text = line[0]
if "Document Number:" in text and len(iDoc)>0:
content.append("\n".join(iDoc))
iDoc = []
iDoc.append(text)
return content
def load_gmo(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
GMO_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/gmo-hedging/GMOHedging_v1.0/gmo-anti/{}"
parts = create_gmo(GMO_HOME.format("anti_GMO"))
labels = np.zeros(len(parts))
parts.extend(create_gmo(GMO_HOME.format("pro_GMO")))
labels = np.append(labels, np.ones(len(parts)-len(labels)))
data = bunch.Bunch()
data.data = parts
data.target = labels
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind]))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def clean_xml_pan(xml_text, parser=None):
text = ""
# try:
root = ET.fromstring(xml_text, parser=parser)
for post in root.findall("post"):
text += "\n" + post.text.strip()
# except Exception:
# print xml_text
return text
def load_blogpan(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
load text files from author gender profiling dataset from folders to memory.
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
PAN13_HOME = "C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender-profiling/blogs/blogs"
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test aviation data yet")
elif subset == "all":
data = load_files(PAN13_HOME, encoding="latin1", load_content=True,
random_state=rnd)
# parser = XMLParser(encoding="latin-1", recover=True)
parser = etree.XMLParser(recover=True)
data.data = [clean_xml_pan(text, parser=parser) for text in data.data]
# data["test"] = load_files("{0}/{1}".format(AVI_HOME, "test"), encoding="latin1", load_content=True,
# random_state=rnd)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
# train_x, test_x, train_y, test_y = train_test_split(data.data, data.target, test_size=0.25,
# random_state=rnd)
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind]))
# if shuffle:
# random_state = np.random.RandomState(rnd)
# indices = np.arange(data.train.target.shape[0])
# random_state.shuffle(indices)
# data.train.filenames = data.train.filenames[indices]
# data.train.target = data.train.target[indices]
# # Use an object array to shuffle: avoids memory copy
# data_lst = np.array(data.train.data, dtype=object)
# data_lst = data_lst[indices]
# data.train.data = data_lst.tolist()
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
# from sklearn.datasets import fetch_mldata
def load_biocreative(path, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
# target = []
# target_names = []
# filenames = []
#
# folders = [f for f in sorted(listdir(container_path))
# if isdir(join(container_path, f))]
#
# if categories is not None:
# folders = [f for f in folders if f in categories]
#
# for label, folder in enumerate(folders):
# target_names.append(folder)
# folder_path = join(container_path, folder)
# documents = [join(folder_path, d)
# for d in sorted(listdir(folder_path))]
# target.extend(len(documents) * [label])
# filenames.extend(documents)
#
# # convert to array for fancy indexing
# filenames = np.array(filenames)
# target = np.array(target)
#
# if shuffle:
# random_state = check_random_state(random_state)
# indices = np.arange(filenames.shape[0])
# random_state.shuffle(indices)
# filenames = filenames[indices]
# target = target[indices]
#
# if load_content:
# data = [open(filename, 'rb').read() for filename in filenames]
# if encoding is not None:
# data = [d.decode(encoding, decode_error) for d in data]
# return Bunch(data=data,
# filenames=filenames,
# target_names=target_names,
# target=target,
# DESCR=description)
#
# return Bunch(filenames=filenames,
# target_names=target_names,
# target=target,
# DESCR=description)
raise Exception("We are not ready for that data yet")
WEBKB_HOME='C:/Users/mramire8/Documents/Datasets/webkb/webkb'
def clean_html_text(html_text):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_text)
return soup.get_text()
def get_sub_filenames(input_dir):
names = []
for path, subdirs, files in os.walk(input_dir):
for filename in files:
names.append(os.path.join(path, filename))
return names
def load_files_sub(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
charset=None, charset_error=None,
decode_error='strict', random_state=0):
"""
Adapted from load_file of sklearn, this loads files from directories and subdirectories
:param container_path:
:param description:
:param categories:
:param load_content:
:param shuffle:
:param encoding:
:param charset:
:param charset_error:
:param decode_error:
:param random_state:
:return:
"""
from os.path import isdir
from os import listdir
from os.path import join
target = []
target_names = []
filenames = []
## get the folders
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
# get categories
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
## get all files from subfolders
documents = [join(folder_path, d)
for d in sorted(get_sub_filenames(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename, 'rb').read() for filename in filenames]
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return bunch.Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return bunch.Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_webkb(path, categories=None, subset="all", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):
"""
Read and process data from webkb dataset. Documents are files in html format
:param path: loacation of the root directory of the data
:param categories: categories to load COURSE, DEPARTMENT, FACULTY, OTHER, PROJECT, STAFF, STUDENT
:param subset: --unused at the moment --
:param shuffle: --unused at the moment --
:param rnd: random seed value
:param vct: vectorizer for feature vector representation
:param fix_k: truncate data a the k-th word, none if including all words
:param min_size: minimum size document acceptable to load
:param raw: return data without feature vectores
:param percent: Percentage to split train-test dataset e.g. .25 will produce a 75% training, 25% test
:return: Bunch :
.train.data text of data
.train.target target vector
.train.bow feature vector of full documents
.train.bowk feature of k-words documents
.train.kwords text of k-word documents
.test.data test text data
.test.target test target vector
.text.bow feature vector of test documents
:raise ValueError:
"""
data = bunch.Bunch()
if subset in ('train', 'test'):
# data[subset] = load_files("{0}/{1}".format(AVI_HOME, subset), encoding="latin1", load_content=True,
# random_state=rnd)
raise Exception("We are not ready for train test webkb data yet")
elif subset == "all":
data = load_files_sub(WEBKB_HOME, encoding="latin1", load_content=True, random_state=rnd)
data.data = [clean_html_text(text) for text in data.data]
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(rnd)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
for train_ind, test_ind in indices:
data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind],
target_names=data.target_names),
test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind],
target_names=data.target_names))
if not raw:
data = process_data(data, fix_k, min_size, vct)
return data
def split_data(data, splits=2.0, rnd=987654321):
"""
:param data: is a bunch with data.data and data.target
:param splits: number of splits (translates into percentages
:param rnd: random number
:return: two bunches with the split
"""
percent = 1.0 / splits
indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)
part1 = bunch.Bunch()
part2 = bunch.Bunch()
for train_ind, test_ind in indices:
part1 = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]))#, target_names=data.target_names))
part2 = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind])) #, target_names=data.target_names))
return part1, part2
|
MuffinMedic/CloudBot
|
refs/heads/TechSupport
|
cloudbot/event.py
|
3
|
import asyncio
import concurrent.futures
import enum
import logging
import sys
import warnings
from functools import partial
from cloudbot.util.parsers.irc import Message
logger = logging.getLogger("cloudbot")
@enum.unique
class EventType(enum.Enum):
message = 0
action = 1
# TODO: Do we actually want to have a 'notice' event type? Should the NOTICE command be a 'message' type?
notice = 2
join = 3
part = 4
kick = 5
other = 6
class Event:
"""
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type type: EventType
:type content: str
:type target: str
:type chan: str
:type nick: str
:type user: str
:type host: str
:type mask: str
:type db: sqlalchemy.orm.Session
:type db_executor: concurrent.futures.ThreadPoolExecutor
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: str
:type irc_ctcp_text: str
"""
def __init__(self, *, bot=None, hook=None, conn=None, base_event=None, event_type=EventType.other, content=None,
content_raw=None, target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None,
irc_prefix=None, irc_command=None, irc_paramlist=None, irc_ctcp_text=None):
"""
All of these parameters except for `bot` and `hook` are optional.
The irc_* parameters should only be specified for IRC events.
Note that the `bot` argument may be left out if you specify a `base_event`.
:param bot: The CloudBot instance this event was triggered from
:param conn: The Client instance this event was triggered from
:param hook: The hook this event will be passed to
:param base_event: The base event that this event is based on. If this parameter is not None, then nick, user,
host, mask, and irc_* arguments are ignored
:param event_type: The type of the event
:param content: The content of the message, or the reason for an join or part
:param target: The target of the action, for example the user being kicked, or invited
:param channel: The channel that this action took place in
:param nick: The nickname of the sender that triggered this event
:param user: The user of the sender that triggered this event
:param host: The host of the sender that triggered this event
:param mask: The mask of the sender that triggered this event (nick!user@host)
:param irc_raw: The raw IRC line
:param irc_prefix: The raw IRC prefix
:param irc_command: The IRC command
:param irc_paramlist: The list of params for the IRC command. If the last param is a content param, the ':'
should be removed from the front.
:param irc_ctcp_text: CTCP text if this message is a CTCP command
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type base_event: cloudbot.event.Event
:type content: str
:type target: str
:type event_type: EventType
:type nick: str
:type user: str
:type host: str
:type mask: str
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: list[str]
:type irc_ctcp_text: str
"""
self.db = None
self.db_executor = None
self.bot = bot
self.conn = conn
self.hook = hook
if base_event is not None:
# We're copying an event, so inherit values
if self.bot is None and base_event.bot is not None:
self.bot = base_event.bot
if self.conn is None and base_event.conn is not None:
self.conn = base_event.conn
if self.hook is None and base_event.hook is not None:
self.hook = base_event.hook
# If base_event is provided, don't check these parameters, just inherit
self.type = base_event.type
self.content = base_event.content
self.content_raw = base_event.content_raw
self.target = base_event.target
self.chan = base_event.chan
self.nick = base_event.nick
self.user = base_event.user
self.host = base_event.host
self.mask = base_event.mask
# clients-specific parameters
self.irc_raw = base_event.irc_raw
self.irc_prefix = base_event.irc_prefix
self.irc_command = base_event.irc_command
self.irc_paramlist = base_event.irc_paramlist
self.irc_ctcp_text = base_event.irc_ctcp_text
else:
# Since base_event wasn't provided, we can take these parameters
self.type = event_type
self.content = content
self.content_raw = content_raw
self.target = target
self.chan = channel
self.nick = nick
self.user = user
self.host = host
self.mask = mask
# clients-specific parameters
self.irc_raw = irc_raw
self.irc_prefix = irc_prefix
self.irc_command = irc_command
self.irc_paramlist = irc_paramlist
self.irc_ctcp_text = irc_ctcp_text
@asyncio.coroutine
def prepare(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes a database object on this event, if the hook requires it.
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use prepare_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
# logger.debug("Opening database session for {}:threaded=False".format(self.hook.description))
# we're running a coroutine hook with a db, so initialise an executor pool
self.db_executor = concurrent.futures.ThreadPoolExecutor(1)
# be sure to initialize the db in the database executor, so it will be accessible in that thread.
self.db = yield from self.async_call(self.bot.db_session)
def prepare_threaded(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes the database object on this event, if the hook requires it.
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use prepare.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
# logger.debug("Opening database session for {}:threaded=True".format(self.hook.description))
self.db = self.bot.db_session()
@asyncio.coroutine
def close(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use close_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
# logger.debug("Closing database session for {}:threaded=False".format(self.hook.description))
# be sure the close the database in the database executor, as it is only accessable in that one thread
yield from self.async_call(self.db.close)
self.db = None
def close_threaded(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use close.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
# logger.debug("Closing database session for {}:threaded=True".format(self.hook.description))
self.db.close()
self.db = None
@property
def event(self):
"""
:rtype: Event
"""
return self
@property
def loop(self):
"""
:rtype: asyncio.events.AbstractEventLoop
"""
return self.bot.loop
@property
def logger(self):
return logger
def message(self, message, target=None):
"""sends a message to a specific or current channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.message(target, message)
def admin_log(self, message, broadcast=False):
"""Log a message in the current connections admin log
:type message: str
:type broadcast: bool
:param message: The message to log
:param broadcast: Should this be broadcast to all connections
"""
conns = [self.conn] if not broadcast else self.bot.connections.values()
for conn in conns:
if conn and conn.connected:
conn.admin_log(message, console=not broadcast)
def reply(self, *messages, target=None):
"""sends a message to the current channel/user with a prefix
:type message: str
:type target: str
"""
reply_ping = self.conn.config.get("reply_ping", True)
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not messages: # if there are no messages specified, don't do anything
return
if target == self.nick or not reply_ping:
self.conn.message(target, *messages)
else:
self.conn.message(target, "({}) {}".format(self.nick, messages[0]), *messages[1:])
def action(self, message, target=None):
"""sends an action to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.action(target, message)
def ctcp(self, message, ctcp_type, target=None):
"""sends an ctcp to the current channel/user or a specific channel/user
:type message: str
:type ctcp_type: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not hasattr(self.conn, "ctcp"):
raise ValueError("CTCP can only be used on IRC connections")
# noinspection PyUnresolvedReferences
self.conn.ctcp(target, ctcp_type, message)
def notice(self, message, target=None):
"""sends a notice to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
avoid_notices = self.conn.config.get("avoid_notices", False)
if target is None:
if self.nick is None:
raise ValueError("Target must be specified when nick is not assigned")
target = self.nick
# we have a config option to avoid noticing user and PM them instead, so we use it here
if avoid_notices:
self.conn.message(target, message)
else:
self.conn.notice(target, message)
def has_permission(self, permission, notice=True):
""" returns whether or not the current user has a given permission
:type permission: str
:rtype: bool
"""
if not self.mask:
raise ValueError("has_permission requires mask is not assigned")
return self.conn.permissions.has_perm_mask(self.mask, permission, notice=notice)
@asyncio.coroutine
def check_permission(self, permission, notice=True):
""" returns whether or not the current user has a given permission
:type permission: str
:type notice: bool
:rtype: bool
"""
if self.has_permission(permission, notice=notice):
return True
for perm_hook in self.bot.plugin_manager.perm_hooks[permission]:
# noinspection PyTupleAssignmentBalance
ok, res = yield from self.bot.plugin_manager.internal_launch(perm_hook, self)
if ok and res:
return True
return False
@asyncio.coroutine
def check_permissions(self, *perms, notice=True):
for perm in perms:
if (yield from self.check_permission(perm, notice=notice)):
return True
return False
@asyncio.coroutine
def async_call(self, func, *args, **kwargs):
if self.db_executor is not None:
executor = self.db_executor
else:
executor = None
part = partial(func, *args, **kwargs)
result = yield from self.loop.run_in_executor(executor, part)
return result
def is_nick_valid(self, nick):
"""
Returns whether a nick is valid for a given connection
:param nick: The nick to check
:return: Whether or not it is valid
"""
return self.conn.is_nick_valid(nick)
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
raise KeyError(item)
if sys.version_info < (3, 7, 0):
# noinspection PyCompatibility
@asyncio.coroutine
def async_(self, function, *args, **kwargs):
warnings.warn(
"event.async() is deprecated, use event.async_call() instead.",
DeprecationWarning, stacklevel=2
)
result = yield from self.async_call(function, *args, **kwargs)
return result
# Silence deprecation warnings about use of the 'async' name as a function
try:
setattr(Event, 'async', getattr(Event, 'async_'))
except AttributeError:
pass
class CommandEvent(Event):
"""
:type hook: cloudbot.plugin.CommandHook
:type text: str
:type triggered_command: str
"""
def __init__(self, *, bot=None, hook, text, triggered_command, cmd_prefix, conn=None, base_event=None,
event_type=None, content=None, content_raw=None, target=None, channel=None, nick=None, user=None,
host=None, mask=None, irc_raw=None, irc_prefix=None, irc_command=None, irc_paramlist=None):
"""
:param text: The arguments for the command
:param triggered_command: The command that was triggered
:type text: str
:type triggered_command: str
"""
super().__init__(bot=bot, hook=hook, conn=conn, base_event=base_event, event_type=event_type, content=content,
content_raw=content_raw, target=target, channel=channel, nick=nick, user=user, host=host,
mask=mask, irc_raw=irc_raw, irc_prefix=irc_prefix, irc_command=irc_command,
irc_paramlist=irc_paramlist)
self.hook = hook
self.text = text
self.doc = self.hook.doc
self.triggered_command = triggered_command
self.triggered_prefix = cmd_prefix
def notice_doc(self, target=None):
"""sends a notice containing this command's docstring to the current channel/user or a specific channel/user
:type target: str
"""
if self.triggered_command is None:
raise ValueError("Triggered command not set on this event")
if self.hook.doc is None:
message = "{}{} requires additional arguments.".format(self.triggered_prefix, self.triggered_command)
else:
if self.hook.doc.split()[0].isalpha():
# this is using the old format of `name <args> - doc`
message = "{}{}".format(self.triggered_prefix, self.hook.doc)
else:
# this is using the new format of `<args> - doc`
message = "{}{} {}".format(self.triggered_prefix, self.triggered_command, self.hook.doc)
self.notice(message, target=target)
class RegexEvent(Event):
"""
:type hook: cloudbot.plugin.RegexHook
:type match: re.__Match
"""
def __init__(self, *, bot=None, hook, match, conn=None, base_event=None, event_type=None, content=None, content_raw=None,
target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None):
"""
:param: match: The match objected returned by the regex search method
:type match: re.__Match
"""
super().__init__(bot=bot, conn=conn, hook=hook, base_event=base_event, event_type=event_type, content=content,
content_raw=content_raw, target=target, channel=channel, nick=nick, user=user, host=host, mask=mask,
irc_raw=irc_raw, irc_prefix=irc_prefix, irc_command=irc_command, irc_paramlist=irc_paramlist)
self.match = match
class CapEvent(Event):
def __init__(self, *args, cap, cap_param=None, **kwargs):
super().__init__(*args, **kwargs)
self.cap = cap
self.cap_param = cap_param
class IrcOutEvent(Event):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parsed_line = None
@asyncio.coroutine
def prepare(self):
yield from super().prepare()
if "parsed_line" in self.hook.required_args:
try:
self.parsed_line = Message.parse(self.line)
except Exception:
logger.exception("Unable to parse line requested by hook %s", self.hook)
self.parsed_line = None
def prepare_threaded(self):
super().prepare_threaded()
if "parsed_line" in self.hook.required_args:
try:
self.parsed_line = Message.parse(self.line)
except Exception:
logger.exception("Unable to parse line requested by hook %s", self.hook)
self.parsed_line = None
@property
def line(self):
return str(self.irc_raw)
class PostHookEvent(Event):
def __init__(self, *args, launched_hook=None, launched_event=None, result=None, error=None, **kwargs):
super().__init__(*args, **kwargs)
self.launched_hook = launched_hook
self.launched_event = launched_event
self.result = result
self.error = error
|
Drooids/odoo
|
refs/heads/8.0
|
addons/website_google_map/__init__.py
|
1350
|
import controllers
|
KaranToor/MA450
|
refs/heads/master
|
google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/datastore/datastore_v4_pb.py
|
6
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: apphosting/datastore/datastore_v4.proto
from googlecloudsdk.third_party.appengine.proto import ProtocolBuffer
import array
import thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from googlecloudsdk.third_party.appengine.datastore.entity_v4_pb import *
import googlecloudsdk.third_party.appengine.datastore.entity_v4_pb
class Error(ProtocolBuffer.ProtocolMessage):
# ErrorCode values
BAD_REQUEST = 1
CONCURRENT_TRANSACTION = 2
INTERNAL_ERROR = 3
NEED_INDEX = 4
TIMEOUT = 5
PERMISSION_DENIED = 6
BIGTABLE_ERROR = 7
COMMITTED_BUT_STILL_APPLYING = 8
CAPABILITY_DISABLED = 9
TRY_ALTERNATE_BACKEND = 10
SAFE_TIME_TOO_OLD = 11
RESOURCE_EXHAUSTED = 12
NOT_FOUND = 13
ALREADY_EXISTS = 14
FAILED_PRECONDITION = 15
UNAUTHENTICATED = 16
_ErrorCode_NAMES = {
1: "BAD_REQUEST",
2: "CONCURRENT_TRANSACTION",
3: "INTERNAL_ERROR",
4: "NEED_INDEX",
5: "TIMEOUT",
6: "PERMISSION_DENIED",
7: "BIGTABLE_ERROR",
8: "COMMITTED_BUT_STILL_APPLYING",
9: "CAPABILITY_DISABLED",
10: "TRY_ALTERNATE_BACKEND",
11: "SAFE_TIME_TOO_OLD",
12: "RESOURCE_EXHAUSTED",
13: "NOT_FOUND",
14: "ALREADY_EXISTS",
15: "FAILED_PRECONDITION",
16: "UNAUTHENTICATED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.Error'
class EntityResult(ProtocolBuffer.ProtocolMessage):
# ResultType values
FULL = 1
PROJECTION = 2
KEY_ONLY = 3
_ResultType_NAMES = {
1: "FULL",
2: "PROJECTION",
3: "KEY_ONLY",
}
def ResultType_Name(cls, x): return cls._ResultType_NAMES.get(x, "")
ResultType_Name = classmethod(ResultType_Name)
has_entity_ = 0
has_version_ = 0
version_ = 0
has_cursor_ = 0
cursor_ = ""
def __init__(self, contents=None):
self.entity_ = Entity()
if contents is not None: self.MergeFromString(contents)
def entity(self): return self.entity_
def mutable_entity(self): self.has_entity_ = 1; return self.entity_
def clear_entity(self):self.has_entity_ = 0; self.entity_.Clear()
def has_entity(self): return self.has_entity_
def version(self): return self.version_
def set_version(self, x):
self.has_version_ = 1
self.version_ = x
def clear_version(self):
if self.has_version_:
self.has_version_ = 0
self.version_ = 0
def has_version(self): return self.has_version_
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = ""
def has_cursor(self): return self.has_cursor_
def MergeFrom(self, x):
assert x is not self
if (x.has_entity()): self.mutable_entity().MergeFrom(x.entity())
if (x.has_version()): self.set_version(x.version())
if (x.has_cursor()): self.set_cursor(x.cursor())
def Equals(self, x):
if x is self: return 1
if self.has_entity_ != x.has_entity_: return 0
if self.has_entity_ and self.entity_ != x.entity_: return 0
if self.has_version_ != x.has_version_: return 0
if self.has_version_ and self.version_ != x.version_: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_entity_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity not set.')
elif not self.entity_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.entity_.ByteSize())
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_entity_):
n += 1
n += self.lengthString(self.entity_.ByteSizePartial())
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n
def Clear(self):
self.clear_entity()
self.clear_version()
self.clear_cursor()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.entity_.ByteSize())
self.entity_.OutputUnchecked(out)
if (self.has_version_):
out.putVarInt32(16)
out.putVarInt64(self.version_)
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
def OutputPartial(self, out):
if (self.has_entity_):
out.putVarInt32(10)
out.putVarInt32(self.entity_.ByteSizePartial())
self.entity_.OutputPartial(out)
if (self.has_version_):
out.putVarInt32(16)
out.putVarInt64(self.version_)
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity().TryMerge(tmp)
continue
if tt == 16:
self.set_version(d.getVarInt64())
continue
if tt == 26:
self.set_cursor(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_entity_:
res+=prefix+"entity <\n"
res+=self.entity_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatString(self.cursor_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kentity = 1
kversion = 2
kcursor = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "entity",
2: "version",
3: "cursor",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.EntityResult'
class Query(ProtocolBuffer.ProtocolMessage):
has_filter_ = 0
filter_ = None
has_start_cursor_ = 0
start_cursor_ = ""
has_end_cursor_ = 0
end_cursor_ = ""
has_offset_ = 0
offset_ = 0
has_limit_ = 0
limit_ = 0
def __init__(self, contents=None):
self.projection_ = []
self.kind_ = []
self.order_ = []
self.group_by_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def projection_size(self): return len(self.projection_)
def projection_list(self): return self.projection_
def projection(self, i):
return self.projection_[i]
def mutable_projection(self, i):
return self.projection_[i]
def add_projection(self):
x = PropertyExpression()
self.projection_.append(x)
return x
def clear_projection(self):
self.projection_ = []
def kind_size(self): return len(self.kind_)
def kind_list(self): return self.kind_
def kind(self, i):
return self.kind_[i]
def mutable_kind(self, i):
return self.kind_[i]
def add_kind(self):
x = KindExpression()
self.kind_.append(x)
return x
def clear_kind(self):
self.kind_ = []
def filter(self):
if self.filter_ is None:
self.lazy_init_lock_.acquire()
try:
if self.filter_ is None: self.filter_ = Filter()
finally:
self.lazy_init_lock_.release()
return self.filter_
def mutable_filter(self): self.has_filter_ = 1; return self.filter()
def clear_filter(self):
# Warning: this method does not acquire the lock.
if self.has_filter_:
self.has_filter_ = 0;
if self.filter_ is not None: self.filter_.Clear()
def has_filter(self): return self.has_filter_
def order_size(self): return len(self.order_)
def order_list(self): return self.order_
def order(self, i):
return self.order_[i]
def mutable_order(self, i):
return self.order_[i]
def add_order(self):
x = PropertyOrder()
self.order_.append(x)
return x
def clear_order(self):
self.order_ = []
def group_by_size(self): return len(self.group_by_)
def group_by_list(self): return self.group_by_
def group_by(self, i):
return self.group_by_[i]
def mutable_group_by(self, i):
return self.group_by_[i]
def add_group_by(self):
x = PropertyReference()
self.group_by_.append(x)
return x
def clear_group_by(self):
self.group_by_ = []
def start_cursor(self): return self.start_cursor_
def set_start_cursor(self, x):
self.has_start_cursor_ = 1
self.start_cursor_ = x
def clear_start_cursor(self):
if self.has_start_cursor_:
self.has_start_cursor_ = 0
self.start_cursor_ = ""
def has_start_cursor(self): return self.has_start_cursor_
def end_cursor(self): return self.end_cursor_
def set_end_cursor(self, x):
self.has_end_cursor_ = 1
self.end_cursor_ = x
def clear_end_cursor(self):
if self.has_end_cursor_:
self.has_end_cursor_ = 0
self.end_cursor_ = ""
def has_end_cursor(self): return self.has_end_cursor_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 0
def has_limit(self): return self.has_limit_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.projection_size()): self.add_projection().CopyFrom(x.projection(i))
for i in xrange(x.kind_size()): self.add_kind().CopyFrom(x.kind(i))
if (x.has_filter()): self.mutable_filter().MergeFrom(x.filter())
for i in xrange(x.order_size()): self.add_order().CopyFrom(x.order(i))
for i in xrange(x.group_by_size()): self.add_group_by().CopyFrom(x.group_by(i))
if (x.has_start_cursor()): self.set_start_cursor(x.start_cursor())
if (x.has_end_cursor()): self.set_end_cursor(x.end_cursor())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_limit()): self.set_limit(x.limit())
def Equals(self, x):
if x is self: return 1
if len(self.projection_) != len(x.projection_): return 0
for e1, e2 in zip(self.projection_, x.projection_):
if e1 != e2: return 0
if len(self.kind_) != len(x.kind_): return 0
for e1, e2 in zip(self.kind_, x.kind_):
if e1 != e2: return 0
if self.has_filter_ != x.has_filter_: return 0
if self.has_filter_ and self.filter_ != x.filter_: return 0
if len(self.order_) != len(x.order_): return 0
for e1, e2 in zip(self.order_, x.order_):
if e1 != e2: return 0
if len(self.group_by_) != len(x.group_by_): return 0
for e1, e2 in zip(self.group_by_, x.group_by_):
if e1 != e2: return 0
if self.has_start_cursor_ != x.has_start_cursor_: return 0
if self.has_start_cursor_ and self.start_cursor_ != x.start_cursor_: return 0
if self.has_end_cursor_ != x.has_end_cursor_: return 0
if self.has_end_cursor_ and self.end_cursor_ != x.end_cursor_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.projection_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.kind_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_filter_ and not self.filter_.IsInitialized(debug_strs)): initialized = 0
for p in self.order_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.group_by_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.projection_)
for i in xrange(len(self.projection_)): n += self.lengthString(self.projection_[i].ByteSize())
n += 1 * len(self.kind_)
for i in xrange(len(self.kind_)): n += self.lengthString(self.kind_[i].ByteSize())
if (self.has_filter_): n += 1 + self.lengthString(self.filter_.ByteSize())
n += 1 * len(self.order_)
for i in xrange(len(self.order_)): n += self.lengthString(self.order_[i].ByteSize())
n += 1 * len(self.group_by_)
for i in xrange(len(self.group_by_)): n += self.lengthString(self.group_by_[i].ByteSize())
if (self.has_start_cursor_): n += 1 + self.lengthString(len(self.start_cursor_))
if (self.has_end_cursor_): n += 1 + self.lengthString(len(self.end_cursor_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.projection_)
for i in xrange(len(self.projection_)): n += self.lengthString(self.projection_[i].ByteSizePartial())
n += 1 * len(self.kind_)
for i in xrange(len(self.kind_)): n += self.lengthString(self.kind_[i].ByteSizePartial())
if (self.has_filter_): n += 1 + self.lengthString(self.filter_.ByteSizePartial())
n += 1 * len(self.order_)
for i in xrange(len(self.order_)): n += self.lengthString(self.order_[i].ByteSizePartial())
n += 1 * len(self.group_by_)
for i in xrange(len(self.group_by_)): n += self.lengthString(self.group_by_[i].ByteSizePartial())
if (self.has_start_cursor_): n += 1 + self.lengthString(len(self.start_cursor_))
if (self.has_end_cursor_): n += 1 + self.lengthString(len(self.end_cursor_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
return n
def Clear(self):
self.clear_projection()
self.clear_kind()
self.clear_filter()
self.clear_order()
self.clear_group_by()
self.clear_start_cursor()
self.clear_end_cursor()
self.clear_offset()
self.clear_limit()
def OutputUnchecked(self, out):
for i in xrange(len(self.projection_)):
out.putVarInt32(18)
out.putVarInt32(self.projection_[i].ByteSize())
self.projection_[i].OutputUnchecked(out)
for i in xrange(len(self.kind_)):
out.putVarInt32(26)
out.putVarInt32(self.kind_[i].ByteSize())
self.kind_[i].OutputUnchecked(out)
if (self.has_filter_):
out.putVarInt32(34)
out.putVarInt32(self.filter_.ByteSize())
self.filter_.OutputUnchecked(out)
for i in xrange(len(self.order_)):
out.putVarInt32(42)
out.putVarInt32(self.order_[i].ByteSize())
self.order_[i].OutputUnchecked(out)
for i in xrange(len(self.group_by_)):
out.putVarInt32(50)
out.putVarInt32(self.group_by_[i].ByteSize())
self.group_by_[i].OutputUnchecked(out)
if (self.has_start_cursor_):
out.putVarInt32(58)
out.putPrefixedString(self.start_cursor_)
if (self.has_end_cursor_):
out.putVarInt32(66)
out.putPrefixedString(self.end_cursor_)
if (self.has_offset_):
out.putVarInt32(80)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(88)
out.putVarInt32(self.limit_)
def OutputPartial(self, out):
for i in xrange(len(self.projection_)):
out.putVarInt32(18)
out.putVarInt32(self.projection_[i].ByteSizePartial())
self.projection_[i].OutputPartial(out)
for i in xrange(len(self.kind_)):
out.putVarInt32(26)
out.putVarInt32(self.kind_[i].ByteSizePartial())
self.kind_[i].OutputPartial(out)
if (self.has_filter_):
out.putVarInt32(34)
out.putVarInt32(self.filter_.ByteSizePartial())
self.filter_.OutputPartial(out)
for i in xrange(len(self.order_)):
out.putVarInt32(42)
out.putVarInt32(self.order_[i].ByteSizePartial())
self.order_[i].OutputPartial(out)
for i in xrange(len(self.group_by_)):
out.putVarInt32(50)
out.putVarInt32(self.group_by_[i].ByteSizePartial())
self.group_by_[i].OutputPartial(out)
if (self.has_start_cursor_):
out.putVarInt32(58)
out.putPrefixedString(self.start_cursor_)
if (self.has_end_cursor_):
out.putVarInt32(66)
out.putPrefixedString(self.end_cursor_)
if (self.has_offset_):
out.putVarInt32(80)
out.putVarInt32(self.offset_)
if (self.has_limit_):
out.putVarInt32(88)
out.putVarInt32(self.limit_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_projection().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_kind().TryMerge(tmp)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_filter().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_order().TryMerge(tmp)
continue
if tt == 50:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_group_by().TryMerge(tmp)
continue
if tt == 58:
self.set_start_cursor(d.getPrefixedString())
continue
if tt == 66:
self.set_end_cursor(d.getPrefixedString())
continue
if tt == 80:
self.set_offset(d.getVarInt32())
continue
if tt == 88:
self.set_limit(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.projection_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("projection%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.kind_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("kind%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_filter_:
res+=prefix+"filter <\n"
res+=self.filter_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.order_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("order%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.group_by_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("group_by%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_start_cursor_: res+=prefix+("start_cursor: %s\n" % self.DebugFormatString(self.start_cursor_))
if self.has_end_cursor_: res+=prefix+("end_cursor: %s\n" % self.DebugFormatString(self.end_cursor_))
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kprojection = 2
kkind = 3
kfilter = 4
korder = 5
kgroup_by = 6
kstart_cursor = 7
kend_cursor = 8
koffset = 10
klimit = 11
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "projection",
3: "kind",
4: "filter",
5: "order",
6: "group_by",
7: "start_cursor",
8: "end_cursor",
10: "offset",
11: "limit",
}, 11)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
8: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.NUMERIC,
}, 11, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.Query'
class KindExpression(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.KindExpression'
class PropertyReference(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(18)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_name(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "name",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.PropertyReference'
class PropertyExpression(ProtocolBuffer.ProtocolMessage):
# AggregationFunction values
FIRST = 1
_AggregationFunction_NAMES = {
1: "FIRST",
}
def AggregationFunction_Name(cls, x): return cls._AggregationFunction_NAMES.get(x, "")
AggregationFunction_Name = classmethod(AggregationFunction_Name)
has_property_ = 0
has_aggregation_function_ = 0
aggregation_function_ = 0
def __init__(self, contents=None):
self.property_ = PropertyReference()
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def mutable_property(self): self.has_property_ = 1; return self.property_
def clear_property(self):self.has_property_ = 0; self.property_.Clear()
def has_property(self): return self.has_property_
def aggregation_function(self): return self.aggregation_function_
def set_aggregation_function(self, x):
self.has_aggregation_function_ = 1
self.aggregation_function_ = x
def clear_aggregation_function(self):
if self.has_aggregation_function_:
self.has_aggregation_function_ = 0
self.aggregation_function_ = 0
def has_aggregation_function(self): return self.has_aggregation_function_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.mutable_property().MergeFrom(x.property())
if (x.has_aggregation_function()): self.set_aggregation_function(x.aggregation_function())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_aggregation_function_ != x.has_aggregation_function_: return 0
if self.has_aggregation_function_ and self.aggregation_function_ != x.aggregation_function_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property not set.')
elif not self.property_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.property_.ByteSize())
if (self.has_aggregation_function_): n += 1 + self.lengthVarInt64(self.aggregation_function_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_property_):
n += 1
n += self.lengthString(self.property_.ByteSizePartial())
if (self.has_aggregation_function_): n += 1 + self.lengthVarInt64(self.aggregation_function_)
return n
def Clear(self):
self.clear_property()
self.clear_aggregation_function()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSize())
self.property_.OutputUnchecked(out)
if (self.has_aggregation_function_):
out.putVarInt32(16)
out.putVarInt32(self.aggregation_function_)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSizePartial())
self.property_.OutputPartial(out)
if (self.has_aggregation_function_):
out.putVarInt32(16)
out.putVarInt32(self.aggregation_function_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_property().TryMerge(tmp)
continue
if tt == 16:
self.set_aggregation_function(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_:
res+=prefix+"property <\n"
res+=self.property_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_aggregation_function_: res+=prefix+("aggregation_function: %s\n" % self.DebugFormatInt32(self.aggregation_function_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kproperty = 1
kaggregation_function = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "property",
2: "aggregation_function",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.PropertyExpression'
class PropertyOrder(ProtocolBuffer.ProtocolMessage):
# Direction values
ASCENDING = 1
DESCENDING = 2
_Direction_NAMES = {
1: "ASCENDING",
2: "DESCENDING",
}
def Direction_Name(cls, x): return cls._Direction_NAMES.get(x, "")
Direction_Name = classmethod(Direction_Name)
has_property_ = 0
has_direction_ = 0
direction_ = 1
def __init__(self, contents=None):
self.property_ = PropertyReference()
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def mutable_property(self): self.has_property_ = 1; return self.property_
def clear_property(self):self.has_property_ = 0; self.property_.Clear()
def has_property(self): return self.has_property_
def direction(self): return self.direction_
def set_direction(self, x):
self.has_direction_ = 1
self.direction_ = x
def clear_direction(self):
if self.has_direction_:
self.has_direction_ = 0
self.direction_ = 1
def has_direction(self): return self.has_direction_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.mutable_property().MergeFrom(x.property())
if (x.has_direction()): self.set_direction(x.direction())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_direction_ != x.has_direction_: return 0
if self.has_direction_ and self.direction_ != x.direction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property not set.')
elif not self.property_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.property_.ByteSize())
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_property_):
n += 1
n += self.lengthString(self.property_.ByteSizePartial())
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n
def Clear(self):
self.clear_property()
self.clear_direction()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSize())
self.property_.OutputUnchecked(out)
if (self.has_direction_):
out.putVarInt32(16)
out.putVarInt32(self.direction_)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSizePartial())
self.property_.OutputPartial(out)
if (self.has_direction_):
out.putVarInt32(16)
out.putVarInt32(self.direction_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_property().TryMerge(tmp)
continue
if tt == 16:
self.set_direction(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_:
res+=prefix+"property <\n"
res+=self.property_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kproperty = 1
kdirection = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "property",
2: "direction",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.PropertyOrder'
class Filter(ProtocolBuffer.ProtocolMessage):
has_composite_filter_ = 0
composite_filter_ = None
has_property_filter_ = 0
property_filter_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def composite_filter(self):
if self.composite_filter_ is None:
self.lazy_init_lock_.acquire()
try:
if self.composite_filter_ is None: self.composite_filter_ = CompositeFilter()
finally:
self.lazy_init_lock_.release()
return self.composite_filter_
def mutable_composite_filter(self): self.has_composite_filter_ = 1; return self.composite_filter()
def clear_composite_filter(self):
# Warning: this method does not acquire the lock.
if self.has_composite_filter_:
self.has_composite_filter_ = 0;
if self.composite_filter_ is not None: self.composite_filter_.Clear()
def has_composite_filter(self): return self.has_composite_filter_
def property_filter(self):
if self.property_filter_ is None:
self.lazy_init_lock_.acquire()
try:
if self.property_filter_ is None: self.property_filter_ = PropertyFilter()
finally:
self.lazy_init_lock_.release()
return self.property_filter_
def mutable_property_filter(self): self.has_property_filter_ = 1; return self.property_filter()
def clear_property_filter(self):
# Warning: this method does not acquire the lock.
if self.has_property_filter_:
self.has_property_filter_ = 0;
if self.property_filter_ is not None: self.property_filter_.Clear()
def has_property_filter(self): return self.has_property_filter_
def MergeFrom(self, x):
assert x is not self
if (x.has_composite_filter()): self.mutable_composite_filter().MergeFrom(x.composite_filter())
if (x.has_property_filter()): self.mutable_property_filter().MergeFrom(x.property_filter())
def Equals(self, x):
if x is self: return 1
if self.has_composite_filter_ != x.has_composite_filter_: return 0
if self.has_composite_filter_ and self.composite_filter_ != x.composite_filter_: return 0
if self.has_property_filter_ != x.has_property_filter_: return 0
if self.has_property_filter_ and self.property_filter_ != x.property_filter_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_composite_filter_ and not self.composite_filter_.IsInitialized(debug_strs)): initialized = 0
if (self.has_property_filter_ and not self.property_filter_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_composite_filter_): n += 1 + self.lengthString(self.composite_filter_.ByteSize())
if (self.has_property_filter_): n += 1 + self.lengthString(self.property_filter_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_composite_filter_): n += 1 + self.lengthString(self.composite_filter_.ByteSizePartial())
if (self.has_property_filter_): n += 1 + self.lengthString(self.property_filter_.ByteSizePartial())
return n
def Clear(self):
self.clear_composite_filter()
self.clear_property_filter()
def OutputUnchecked(self, out):
if (self.has_composite_filter_):
out.putVarInt32(10)
out.putVarInt32(self.composite_filter_.ByteSize())
self.composite_filter_.OutputUnchecked(out)
if (self.has_property_filter_):
out.putVarInt32(18)
out.putVarInt32(self.property_filter_.ByteSize())
self.property_filter_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_composite_filter_):
out.putVarInt32(10)
out.putVarInt32(self.composite_filter_.ByteSizePartial())
self.composite_filter_.OutputPartial(out)
if (self.has_property_filter_):
out.putVarInt32(18)
out.putVarInt32(self.property_filter_.ByteSizePartial())
self.property_filter_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_composite_filter().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_property_filter().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_composite_filter_:
res+=prefix+"composite_filter <\n"
res+=self.composite_filter_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_property_filter_:
res+=prefix+"property_filter <\n"
res+=self.property_filter_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcomposite_filter = 1
kproperty_filter = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "composite_filter",
2: "property_filter",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.Filter'
class CompositeFilter(ProtocolBuffer.ProtocolMessage):
# Operator values
AND = 1
_Operator_NAMES = {
1: "AND",
}
def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
Operator_Name = classmethod(Operator_Name)
has_operator_ = 0
operator_ = 0
def __init__(self, contents=None):
self.filter_ = []
if contents is not None: self.MergeFromString(contents)
def operator(self): return self.operator_
def set_operator(self, x):
self.has_operator_ = 1
self.operator_ = x
def clear_operator(self):
if self.has_operator_:
self.has_operator_ = 0
self.operator_ = 0
def has_operator(self): return self.has_operator_
def filter_size(self): return len(self.filter_)
def filter_list(self): return self.filter_
def filter(self, i):
return self.filter_[i]
def mutable_filter(self, i):
return self.filter_[i]
def add_filter(self):
x = Filter()
self.filter_.append(x)
return x
def clear_filter(self):
self.filter_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_operator()): self.set_operator(x.operator())
for i in xrange(x.filter_size()): self.add_filter().CopyFrom(x.filter(i))
def Equals(self, x):
if x is self: return 1
if self.has_operator_ != x.has_operator_: return 0
if self.has_operator_ and self.operator_ != x.operator_: return 0
if len(self.filter_) != len(x.filter_): return 0
for e1, e2 in zip(self.filter_, x.filter_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_operator_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: operator not set.')
for p in self.filter_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.operator_)
n += 1 * len(self.filter_)
for i in xrange(len(self.filter_)): n += self.lengthString(self.filter_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_operator_):
n += 1
n += self.lengthVarInt64(self.operator_)
n += 1 * len(self.filter_)
for i in xrange(len(self.filter_)): n += self.lengthString(self.filter_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_operator()
self.clear_filter()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.operator_)
for i in xrange(len(self.filter_)):
out.putVarInt32(18)
out.putVarInt32(self.filter_[i].ByteSize())
self.filter_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_operator_):
out.putVarInt32(8)
out.putVarInt32(self.operator_)
for i in xrange(len(self.filter_)):
out.putVarInt32(18)
out.putVarInt32(self.filter_[i].ByteSizePartial())
self.filter_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_operator(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_filter().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_operator_: res+=prefix+("operator: %s\n" % self.DebugFormatInt32(self.operator_))
cnt=0
for e in self.filter_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("filter%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
koperator = 1
kfilter = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "operator",
2: "filter",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.CompositeFilter'
class PropertyFilter(ProtocolBuffer.ProtocolMessage):
# Operator values
LESS_THAN = 1
LESS_THAN_OR_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_OR_EQUAL = 4
EQUAL = 5
HAS_ANCESTOR = 11
_Operator_NAMES = {
1: "LESS_THAN",
2: "LESS_THAN_OR_EQUAL",
3: "GREATER_THAN",
4: "GREATER_THAN_OR_EQUAL",
5: "EQUAL",
11: "HAS_ANCESTOR",
}
def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
Operator_Name = classmethod(Operator_Name)
has_property_ = 0
has_operator_ = 0
operator_ = 0
has_value_ = 0
def __init__(self, contents=None):
self.property_ = PropertyReference()
self.value_ = Value()
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def mutable_property(self): self.has_property_ = 1; return self.property_
def clear_property(self):self.has_property_ = 0; self.property_.Clear()
def has_property(self): return self.has_property_
def operator(self): return self.operator_
def set_operator(self, x):
self.has_operator_ = 1
self.operator_ = x
def clear_operator(self):
if self.has_operator_:
self.has_operator_ = 0
self.operator_ = 0
def has_operator(self): return self.has_operator_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.mutable_property().MergeFrom(x.property())
if (x.has_operator()): self.set_operator(x.operator())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_operator_ != x.has_operator_: return 0
if self.has_operator_ and self.operator_ != x.operator_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property not set.')
elif not self.property_.IsInitialized(debug_strs): initialized = 0
if (not self.has_operator_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: operator not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.property_.ByteSize())
n += self.lengthVarInt64(self.operator_)
n += self.lengthString(self.value_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_property_):
n += 1
n += self.lengthString(self.property_.ByteSizePartial())
if (self.has_operator_):
n += 1
n += self.lengthVarInt64(self.operator_)
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_property()
self.clear_operator()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSize())
self.property_.OutputUnchecked(out)
out.putVarInt32(16)
out.putVarInt32(self.operator_)
out.putVarInt32(26)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSizePartial())
self.property_.OutputPartial(out)
if (self.has_operator_):
out.putVarInt32(16)
out.putVarInt32(self.operator_)
if (self.has_value_):
out.putVarInt32(26)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_property().TryMerge(tmp)
continue
if tt == 16:
self.set_operator(d.getVarInt32())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_:
res+=prefix+"property <\n"
res+=self.property_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_operator_: res+=prefix+("operator: %s\n" % self.DebugFormatInt32(self.operator_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kproperty = 1
koperator = 2
kvalue = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "property",
2: "operator",
3: "value",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.PropertyFilter'
class GqlQuery(ProtocolBuffer.ProtocolMessage):
has_query_string_ = 0
query_string_ = ""
has_allow_literal_ = 0
allow_literal_ = 0
def __init__(self, contents=None):
self.name_arg_ = []
self.number_arg_ = []
if contents is not None: self.MergeFromString(contents)
def query_string(self): return self.query_string_
def set_query_string(self, x):
self.has_query_string_ = 1
self.query_string_ = x
def clear_query_string(self):
if self.has_query_string_:
self.has_query_string_ = 0
self.query_string_ = ""
def has_query_string(self): return self.has_query_string_
def allow_literal(self): return self.allow_literal_
def set_allow_literal(self, x):
self.has_allow_literal_ = 1
self.allow_literal_ = x
def clear_allow_literal(self):
if self.has_allow_literal_:
self.has_allow_literal_ = 0
self.allow_literal_ = 0
def has_allow_literal(self): return self.has_allow_literal_
def name_arg_size(self): return len(self.name_arg_)
def name_arg_list(self): return self.name_arg_
def name_arg(self, i):
return self.name_arg_[i]
def mutable_name_arg(self, i):
return self.name_arg_[i]
def add_name_arg(self):
x = GqlQueryArg()
self.name_arg_.append(x)
return x
def clear_name_arg(self):
self.name_arg_ = []
def number_arg_size(self): return len(self.number_arg_)
def number_arg_list(self): return self.number_arg_
def number_arg(self, i):
return self.number_arg_[i]
def mutable_number_arg(self, i):
return self.number_arg_[i]
def add_number_arg(self):
x = GqlQueryArg()
self.number_arg_.append(x)
return x
def clear_number_arg(self):
self.number_arg_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_query_string()): self.set_query_string(x.query_string())
if (x.has_allow_literal()): self.set_allow_literal(x.allow_literal())
for i in xrange(x.name_arg_size()): self.add_name_arg().CopyFrom(x.name_arg(i))
for i in xrange(x.number_arg_size()): self.add_number_arg().CopyFrom(x.number_arg(i))
def Equals(self, x):
if x is self: return 1
if self.has_query_string_ != x.has_query_string_: return 0
if self.has_query_string_ and self.query_string_ != x.query_string_: return 0
if self.has_allow_literal_ != x.has_allow_literal_: return 0
if self.has_allow_literal_ and self.allow_literal_ != x.allow_literal_: return 0
if len(self.name_arg_) != len(x.name_arg_): return 0
for e1, e2 in zip(self.name_arg_, x.name_arg_):
if e1 != e2: return 0
if len(self.number_arg_) != len(x.number_arg_): return 0
for e1, e2 in zip(self.number_arg_, x.number_arg_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_query_string_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: query_string not set.')
for p in self.name_arg_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.number_arg_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.query_string_))
if (self.has_allow_literal_): n += 2
n += 1 * len(self.name_arg_)
for i in xrange(len(self.name_arg_)): n += self.lengthString(self.name_arg_[i].ByteSize())
n += 1 * len(self.number_arg_)
for i in xrange(len(self.number_arg_)): n += self.lengthString(self.number_arg_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_query_string_):
n += 1
n += self.lengthString(len(self.query_string_))
if (self.has_allow_literal_): n += 2
n += 1 * len(self.name_arg_)
for i in xrange(len(self.name_arg_)): n += self.lengthString(self.name_arg_[i].ByteSizePartial())
n += 1 * len(self.number_arg_)
for i in xrange(len(self.number_arg_)): n += self.lengthString(self.number_arg_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_query_string()
self.clear_allow_literal()
self.clear_name_arg()
self.clear_number_arg()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.query_string_)
if (self.has_allow_literal_):
out.putVarInt32(16)
out.putBoolean(self.allow_literal_)
for i in xrange(len(self.name_arg_)):
out.putVarInt32(26)
out.putVarInt32(self.name_arg_[i].ByteSize())
self.name_arg_[i].OutputUnchecked(out)
for i in xrange(len(self.number_arg_)):
out.putVarInt32(34)
out.putVarInt32(self.number_arg_[i].ByteSize())
self.number_arg_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_query_string_):
out.putVarInt32(10)
out.putPrefixedString(self.query_string_)
if (self.has_allow_literal_):
out.putVarInt32(16)
out.putBoolean(self.allow_literal_)
for i in xrange(len(self.name_arg_)):
out.putVarInt32(26)
out.putVarInt32(self.name_arg_[i].ByteSizePartial())
self.name_arg_[i].OutputPartial(out)
for i in xrange(len(self.number_arg_)):
out.putVarInt32(34)
out.putVarInt32(self.number_arg_[i].ByteSizePartial())
self.number_arg_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_query_string(d.getPrefixedString())
continue
if tt == 16:
self.set_allow_literal(d.getBoolean())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_name_arg().TryMerge(tmp)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_number_arg().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_query_string_: res+=prefix+("query_string: %s\n" % self.DebugFormatString(self.query_string_))
if self.has_allow_literal_: res+=prefix+("allow_literal: %s\n" % self.DebugFormatBool(self.allow_literal_))
cnt=0
for e in self.name_arg_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("name_arg%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.number_arg_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("number_arg%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kquery_string = 1
kallow_literal = 2
kname_arg = 3
knumber_arg = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "query_string",
2: "allow_literal",
3: "name_arg",
4: "number_arg",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.GqlQuery'
class GqlQueryArg(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_value_ = 0
value_ = None
has_cursor_ = 0
cursor_ = ""
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self):
if self.value_ is None:
self.lazy_init_lock_.acquire()
try:
if self.value_ is None: self.value_ = Value()
finally:
self.lazy_init_lock_.release()
return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value()
def clear_value(self):
# Warning: this method does not acquire the lock.
if self.has_value_:
self.has_value_ = 0;
if self.value_ is not None: self.value_.Clear()
def has_value(self): return self.has_value_
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = ""
def has_cursor(self): return self.has_cursor_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
if (x.has_cursor()): self.set_cursor(x.cursor())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_value_ and not self.value_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
if (self.has_value_): n += 1 + self.lengthString(self.value_.ByteSize())
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
if (self.has_value_): n += 1 + self.lengthString(self.value_.ByteSizePartial())
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n
def Clear(self):
self.clear_name()
self.clear_value()
self.clear_cursor()
def OutputUnchecked(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if tt == 26:
self.set_cursor(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatString(self.cursor_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kvalue = 2
kcursor = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "value",
3: "cursor",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.GqlQueryArg'
class QueryResultBatch(ProtocolBuffer.ProtocolMessage):
# MoreResultsType values
NOT_FINISHED = 1
MORE_RESULTS_AFTER_LIMIT = 2
NO_MORE_RESULTS = 3
_MoreResultsType_NAMES = {
1: "NOT_FINISHED",
2: "MORE_RESULTS_AFTER_LIMIT",
3: "NO_MORE_RESULTS",
}
def MoreResultsType_Name(cls, x): return cls._MoreResultsType_NAMES.get(x, "")
MoreResultsType_Name = classmethod(MoreResultsType_Name)
has_entity_result_type_ = 0
entity_result_type_ = 0
has_skipped_cursor_ = 0
skipped_cursor_ = ""
has_end_cursor_ = 0
end_cursor_ = ""
has_more_results_ = 0
more_results_ = 0
has_skipped_results_ = 0
skipped_results_ = 0
has_snapshot_version_ = 0
snapshot_version_ = 0
def __init__(self, contents=None):
self.entity_result_ = []
if contents is not None: self.MergeFromString(contents)
def entity_result_type(self): return self.entity_result_type_
def set_entity_result_type(self, x):
self.has_entity_result_type_ = 1
self.entity_result_type_ = x
def clear_entity_result_type(self):
if self.has_entity_result_type_:
self.has_entity_result_type_ = 0
self.entity_result_type_ = 0
def has_entity_result_type(self): return self.has_entity_result_type_
def entity_result_size(self): return len(self.entity_result_)
def entity_result_list(self): return self.entity_result_
def entity_result(self, i):
return self.entity_result_[i]
def mutable_entity_result(self, i):
return self.entity_result_[i]
def add_entity_result(self):
x = EntityResult()
self.entity_result_.append(x)
return x
def clear_entity_result(self):
self.entity_result_ = []
def skipped_cursor(self): return self.skipped_cursor_
def set_skipped_cursor(self, x):
self.has_skipped_cursor_ = 1
self.skipped_cursor_ = x
def clear_skipped_cursor(self):
if self.has_skipped_cursor_:
self.has_skipped_cursor_ = 0
self.skipped_cursor_ = ""
def has_skipped_cursor(self): return self.has_skipped_cursor_
def end_cursor(self): return self.end_cursor_
def set_end_cursor(self, x):
self.has_end_cursor_ = 1
self.end_cursor_ = x
def clear_end_cursor(self):
if self.has_end_cursor_:
self.has_end_cursor_ = 0
self.end_cursor_ = ""
def has_end_cursor(self): return self.has_end_cursor_
def more_results(self): return self.more_results_
def set_more_results(self, x):
self.has_more_results_ = 1
self.more_results_ = x
def clear_more_results(self):
if self.has_more_results_:
self.has_more_results_ = 0
self.more_results_ = 0
def has_more_results(self): return self.has_more_results_
def skipped_results(self): return self.skipped_results_
def set_skipped_results(self, x):
self.has_skipped_results_ = 1
self.skipped_results_ = x
def clear_skipped_results(self):
if self.has_skipped_results_:
self.has_skipped_results_ = 0
self.skipped_results_ = 0
def has_skipped_results(self): return self.has_skipped_results_
def snapshot_version(self): return self.snapshot_version_
def set_snapshot_version(self, x):
self.has_snapshot_version_ = 1
self.snapshot_version_ = x
def clear_snapshot_version(self):
if self.has_snapshot_version_:
self.has_snapshot_version_ = 0
self.snapshot_version_ = 0
def has_snapshot_version(self): return self.has_snapshot_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_entity_result_type()): self.set_entity_result_type(x.entity_result_type())
for i in xrange(x.entity_result_size()): self.add_entity_result().CopyFrom(x.entity_result(i))
if (x.has_skipped_cursor()): self.set_skipped_cursor(x.skipped_cursor())
if (x.has_end_cursor()): self.set_end_cursor(x.end_cursor())
if (x.has_more_results()): self.set_more_results(x.more_results())
if (x.has_skipped_results()): self.set_skipped_results(x.skipped_results())
if (x.has_snapshot_version()): self.set_snapshot_version(x.snapshot_version())
def Equals(self, x):
if x is self: return 1
if self.has_entity_result_type_ != x.has_entity_result_type_: return 0
if self.has_entity_result_type_ and self.entity_result_type_ != x.entity_result_type_: return 0
if len(self.entity_result_) != len(x.entity_result_): return 0
for e1, e2 in zip(self.entity_result_, x.entity_result_):
if e1 != e2: return 0
if self.has_skipped_cursor_ != x.has_skipped_cursor_: return 0
if self.has_skipped_cursor_ and self.skipped_cursor_ != x.skipped_cursor_: return 0
if self.has_end_cursor_ != x.has_end_cursor_: return 0
if self.has_end_cursor_ and self.end_cursor_ != x.end_cursor_: return 0
if self.has_more_results_ != x.has_more_results_: return 0
if self.has_more_results_ and self.more_results_ != x.more_results_: return 0
if self.has_skipped_results_ != x.has_skipped_results_: return 0
if self.has_skipped_results_ and self.skipped_results_ != x.skipped_results_: return 0
if self.has_snapshot_version_ != x.has_snapshot_version_: return 0
if self.has_snapshot_version_ and self.snapshot_version_ != x.snapshot_version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_entity_result_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_result_type not set.')
for p in self.entity_result_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_more_results_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: more_results not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.entity_result_type_)
n += 1 * len(self.entity_result_)
for i in xrange(len(self.entity_result_)): n += self.lengthString(self.entity_result_[i].ByteSize())
if (self.has_skipped_cursor_): n += 1 + self.lengthString(len(self.skipped_cursor_))
if (self.has_end_cursor_): n += 1 + self.lengthString(len(self.end_cursor_))
n += self.lengthVarInt64(self.more_results_)
if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
if (self.has_snapshot_version_): n += 1 + self.lengthVarInt64(self.snapshot_version_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_entity_result_type_):
n += 1
n += self.lengthVarInt64(self.entity_result_type_)
n += 1 * len(self.entity_result_)
for i in xrange(len(self.entity_result_)): n += self.lengthString(self.entity_result_[i].ByteSizePartial())
if (self.has_skipped_cursor_): n += 1 + self.lengthString(len(self.skipped_cursor_))
if (self.has_end_cursor_): n += 1 + self.lengthString(len(self.end_cursor_))
if (self.has_more_results_):
n += 1
n += self.lengthVarInt64(self.more_results_)
if (self.has_skipped_results_): n += 1 + self.lengthVarInt64(self.skipped_results_)
if (self.has_snapshot_version_): n += 1 + self.lengthVarInt64(self.snapshot_version_)
return n
def Clear(self):
self.clear_entity_result_type()
self.clear_entity_result()
self.clear_skipped_cursor()
self.clear_end_cursor()
self.clear_more_results()
self.clear_skipped_results()
self.clear_snapshot_version()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.entity_result_type_)
for i in xrange(len(self.entity_result_)):
out.putVarInt32(18)
out.putVarInt32(self.entity_result_[i].ByteSize())
self.entity_result_[i].OutputUnchecked(out)
if (self.has_skipped_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.skipped_cursor_)
if (self.has_end_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.end_cursor_)
out.putVarInt32(40)
out.putVarInt32(self.more_results_)
if (self.has_skipped_results_):
out.putVarInt32(48)
out.putVarInt32(self.skipped_results_)
if (self.has_snapshot_version_):
out.putVarInt32(56)
out.putVarInt64(self.snapshot_version_)
def OutputPartial(self, out):
if (self.has_entity_result_type_):
out.putVarInt32(8)
out.putVarInt32(self.entity_result_type_)
for i in xrange(len(self.entity_result_)):
out.putVarInt32(18)
out.putVarInt32(self.entity_result_[i].ByteSizePartial())
self.entity_result_[i].OutputPartial(out)
if (self.has_skipped_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.skipped_cursor_)
if (self.has_end_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.end_cursor_)
if (self.has_more_results_):
out.putVarInt32(40)
out.putVarInt32(self.more_results_)
if (self.has_skipped_results_):
out.putVarInt32(48)
out.putVarInt32(self.skipped_results_)
if (self.has_snapshot_version_):
out.putVarInt32(56)
out.putVarInt64(self.snapshot_version_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_entity_result_type(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_entity_result().TryMerge(tmp)
continue
if tt == 26:
self.set_skipped_cursor(d.getPrefixedString())
continue
if tt == 34:
self.set_end_cursor(d.getPrefixedString())
continue
if tt == 40:
self.set_more_results(d.getVarInt32())
continue
if tt == 48:
self.set_skipped_results(d.getVarInt32())
continue
if tt == 56:
self.set_snapshot_version(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_entity_result_type_: res+=prefix+("entity_result_type: %s\n" % self.DebugFormatInt32(self.entity_result_type_))
cnt=0
for e in self.entity_result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("entity_result%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_skipped_cursor_: res+=prefix+("skipped_cursor: %s\n" % self.DebugFormatString(self.skipped_cursor_))
if self.has_end_cursor_: res+=prefix+("end_cursor: %s\n" % self.DebugFormatString(self.end_cursor_))
if self.has_more_results_: res+=prefix+("more_results: %s\n" % self.DebugFormatInt32(self.more_results_))
if self.has_skipped_results_: res+=prefix+("skipped_results: %s\n" % self.DebugFormatInt32(self.skipped_results_))
if self.has_snapshot_version_: res+=prefix+("snapshot_version: %s\n" % self.DebugFormatInt64(self.snapshot_version_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kentity_result_type = 1
kentity_result = 2
kskipped_cursor = 3
kend_cursor = 4
kmore_results = 5
kskipped_results = 6
ksnapshot_version = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "entity_result_type",
2: "entity_result",
3: "skipped_cursor",
4: "end_cursor",
5: "more_results",
6: "skipped_results",
7: "snapshot_version",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.QueryResultBatch'
class Mutation(ProtocolBuffer.ProtocolMessage):
# Operation values
UNKNOWN = 0
INSERT = 1
UPDATE = 2
UPSERT = 3
DELETE = 4
_Operation_NAMES = {
0: "UNKNOWN",
1: "INSERT",
2: "UPDATE",
3: "UPSERT",
4: "DELETE",
}
def Operation_Name(cls, x): return cls._Operation_NAMES.get(x, "")
Operation_Name = classmethod(Operation_Name)
has_op_ = 0
op_ = 0
has_key_ = 0
key_ = None
has_entity_ = 0
entity_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def op(self): return self.op_
def set_op(self, x):
self.has_op_ = 1
self.op_ = x
def clear_op(self):
if self.has_op_:
self.has_op_ = 0
self.op_ = 0
def has_op(self): return self.has_op_
def key(self):
if self.key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.key_ is None: self.key_ = Key()
finally:
self.lazy_init_lock_.release()
return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key()
def clear_key(self):
# Warning: this method does not acquire the lock.
if self.has_key_:
self.has_key_ = 0;
if self.key_ is not None: self.key_.Clear()
def has_key(self): return self.has_key_
def entity(self):
if self.entity_ is None:
self.lazy_init_lock_.acquire()
try:
if self.entity_ is None: self.entity_ = Entity()
finally:
self.lazy_init_lock_.release()
return self.entity_
def mutable_entity(self): self.has_entity_ = 1; return self.entity()
def clear_entity(self):
# Warning: this method does not acquire the lock.
if self.has_entity_:
self.has_entity_ = 0;
if self.entity_ is not None: self.entity_.Clear()
def has_entity(self): return self.has_entity_
def MergeFrom(self, x):
assert x is not self
if (x.has_op()): self.set_op(x.op())
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_entity()): self.mutable_entity().MergeFrom(x.entity())
def Equals(self, x):
if x is self: return 1
if self.has_op_ != x.has_op_: return 0
if self.has_op_ and self.op_ != x.op_: return 0
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_entity_ != x.has_entity_: return 0
if self.has_entity_ and self.entity_ != x.entity_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
if (self.has_entity_ and not self.entity_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_op_): n += 1 + self.lengthVarInt64(self.op_)
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSize())
if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_op_): n += 1 + self.lengthVarInt64(self.op_)
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSizePartial())
if (self.has_entity_): n += 1 + self.lengthString(self.entity_.ByteSizePartial())
return n
def Clear(self):
self.clear_op()
self.clear_key()
self.clear_entity()
def OutputUnchecked(self, out):
if (self.has_op_):
out.putVarInt32(8)
out.putVarInt32(self.op_)
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_entity_):
out.putVarInt32(26)
out.putVarInt32(self.entity_.ByteSize())
self.entity_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_op_):
out.putVarInt32(8)
out.putVarInt32(self.op_)
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_entity_):
out.putVarInt32(26)
out.putVarInt32(self.entity_.ByteSizePartial())
self.entity_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_op(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_op_: res+=prefix+("op: %s\n" % self.DebugFormatInt32(self.op_))
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_:
res+=prefix+"entity <\n"
res+=self.entity_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kop = 1
kkey = 2
kentity = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "op",
2: "key",
3: "entity",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.Mutation'
class MutationResult(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = None
has_new_version_ = 0
new_version_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def key(self):
if self.key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.key_ is None: self.key_ = Key()
finally:
self.lazy_init_lock_.release()
return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key()
def clear_key(self):
# Warning: this method does not acquire the lock.
if self.has_key_:
self.has_key_ = 0;
if self.key_ is not None: self.key_.Clear()
def has_key(self): return self.has_key_
def new_version(self): return self.new_version_
def set_new_version(self, x):
self.has_new_version_ = 1
self.new_version_ = x
def clear_new_version(self):
if self.has_new_version_:
self.has_new_version_ = 0
self.new_version_ = 0
def has_new_version(self): return self.has_new_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_new_version()): self.set_new_version(x.new_version())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_new_version_ != x.has_new_version_: return 0
if self.has_new_version_ and self.new_version_ != x.new_version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSize())
if (self.has_new_version_): n += 1 + self.lengthVarInt64(self.new_version_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSizePartial())
if (self.has_new_version_): n += 1 + self.lengthVarInt64(self.new_version_)
return n
def Clear(self):
self.clear_key()
self.clear_new_version()
def OutputUnchecked(self, out):
if (self.has_key_):
out.putVarInt32(26)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_new_version_):
out.putVarInt32(32)
out.putVarInt64(self.new_version_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(26)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_new_version_):
out.putVarInt32(32)
out.putVarInt64(self.new_version_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 32:
self.set_new_version(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_new_version_: res+=prefix+("new_version: %s\n" % self.DebugFormatInt64(self.new_version_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 3
knew_version = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
3: "key",
4: "new_version",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.MutationResult'
class DeprecatedMutation(ProtocolBuffer.ProtocolMessage):
has_force_ = 0
force_ = 0
def __init__(self, contents=None):
self.upsert_ = []
self.update_ = []
self.insert_ = []
self.insert_auto_id_ = []
self.delete_ = []
if contents is not None: self.MergeFromString(contents)
def upsert_size(self): return len(self.upsert_)
def upsert_list(self): return self.upsert_
def upsert(self, i):
return self.upsert_[i]
def mutable_upsert(self, i):
return self.upsert_[i]
def add_upsert(self):
x = Entity()
self.upsert_.append(x)
return x
def clear_upsert(self):
self.upsert_ = []
def update_size(self): return len(self.update_)
def update_list(self): return self.update_
def update(self, i):
return self.update_[i]
def mutable_update(self, i):
return self.update_[i]
def add_update(self):
x = Entity()
self.update_.append(x)
return x
def clear_update(self):
self.update_ = []
def insert_size(self): return len(self.insert_)
def insert_list(self): return self.insert_
def insert(self, i):
return self.insert_[i]
def mutable_insert(self, i):
return self.insert_[i]
def add_insert(self):
x = Entity()
self.insert_.append(x)
return x
def clear_insert(self):
self.insert_ = []
def insert_auto_id_size(self): return len(self.insert_auto_id_)
def insert_auto_id_list(self): return self.insert_auto_id_
def insert_auto_id(self, i):
return self.insert_auto_id_[i]
def mutable_insert_auto_id(self, i):
return self.insert_auto_id_[i]
def add_insert_auto_id(self):
x = Entity()
self.insert_auto_id_.append(x)
return x
def clear_insert_auto_id(self):
self.insert_auto_id_ = []
def delete_size(self): return len(self.delete_)
def delete_list(self): return self.delete_
def delete(self, i):
return self.delete_[i]
def mutable_delete(self, i):
return self.delete_[i]
def add_delete(self):
x = Key()
self.delete_.append(x)
return x
def clear_delete(self):
self.delete_ = []
def force(self): return self.force_
def set_force(self, x):
self.has_force_ = 1
self.force_ = x
def clear_force(self):
if self.has_force_:
self.has_force_ = 0
self.force_ = 0
def has_force(self): return self.has_force_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.upsert_size()): self.add_upsert().CopyFrom(x.upsert(i))
for i in xrange(x.update_size()): self.add_update().CopyFrom(x.update(i))
for i in xrange(x.insert_size()): self.add_insert().CopyFrom(x.insert(i))
for i in xrange(x.insert_auto_id_size()): self.add_insert_auto_id().CopyFrom(x.insert_auto_id(i))
for i in xrange(x.delete_size()): self.add_delete().CopyFrom(x.delete(i))
if (x.has_force()): self.set_force(x.force())
def Equals(self, x):
if x is self: return 1
if len(self.upsert_) != len(x.upsert_): return 0
for e1, e2 in zip(self.upsert_, x.upsert_):
if e1 != e2: return 0
if len(self.update_) != len(x.update_): return 0
for e1, e2 in zip(self.update_, x.update_):
if e1 != e2: return 0
if len(self.insert_) != len(x.insert_): return 0
for e1, e2 in zip(self.insert_, x.insert_):
if e1 != e2: return 0
if len(self.insert_auto_id_) != len(x.insert_auto_id_): return 0
for e1, e2 in zip(self.insert_auto_id_, x.insert_auto_id_):
if e1 != e2: return 0
if len(self.delete_) != len(x.delete_): return 0
for e1, e2 in zip(self.delete_, x.delete_):
if e1 != e2: return 0
if self.has_force_ != x.has_force_: return 0
if self.has_force_ and self.force_ != x.force_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.upsert_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.update_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.insert_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.insert_auto_id_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.delete_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.upsert_)
for i in xrange(len(self.upsert_)): n += self.lengthString(self.upsert_[i].ByteSize())
n += 1 * len(self.update_)
for i in xrange(len(self.update_)): n += self.lengthString(self.update_[i].ByteSize())
n += 1 * len(self.insert_)
for i in xrange(len(self.insert_)): n += self.lengthString(self.insert_[i].ByteSize())
n += 1 * len(self.insert_auto_id_)
for i in xrange(len(self.insert_auto_id_)): n += self.lengthString(self.insert_auto_id_[i].ByteSize())
n += 1 * len(self.delete_)
for i in xrange(len(self.delete_)): n += self.lengthString(self.delete_[i].ByteSize())
if (self.has_force_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.upsert_)
for i in xrange(len(self.upsert_)): n += self.lengthString(self.upsert_[i].ByteSizePartial())
n += 1 * len(self.update_)
for i in xrange(len(self.update_)): n += self.lengthString(self.update_[i].ByteSizePartial())
n += 1 * len(self.insert_)
for i in xrange(len(self.insert_)): n += self.lengthString(self.insert_[i].ByteSizePartial())
n += 1 * len(self.insert_auto_id_)
for i in xrange(len(self.insert_auto_id_)): n += self.lengthString(self.insert_auto_id_[i].ByteSizePartial())
n += 1 * len(self.delete_)
for i in xrange(len(self.delete_)): n += self.lengthString(self.delete_[i].ByteSizePartial())
if (self.has_force_): n += 2
return n
def Clear(self):
self.clear_upsert()
self.clear_update()
self.clear_insert()
self.clear_insert_auto_id()
self.clear_delete()
self.clear_force()
def OutputUnchecked(self, out):
for i in xrange(len(self.upsert_)):
out.putVarInt32(10)
out.putVarInt32(self.upsert_[i].ByteSize())
self.upsert_[i].OutputUnchecked(out)
for i in xrange(len(self.update_)):
out.putVarInt32(18)
out.putVarInt32(self.update_[i].ByteSize())
self.update_[i].OutputUnchecked(out)
for i in xrange(len(self.insert_)):
out.putVarInt32(26)
out.putVarInt32(self.insert_[i].ByteSize())
self.insert_[i].OutputUnchecked(out)
for i in xrange(len(self.insert_auto_id_)):
out.putVarInt32(34)
out.putVarInt32(self.insert_auto_id_[i].ByteSize())
self.insert_auto_id_[i].OutputUnchecked(out)
for i in xrange(len(self.delete_)):
out.putVarInt32(42)
out.putVarInt32(self.delete_[i].ByteSize())
self.delete_[i].OutputUnchecked(out)
if (self.has_force_):
out.putVarInt32(48)
out.putBoolean(self.force_)
def OutputPartial(self, out):
for i in xrange(len(self.upsert_)):
out.putVarInt32(10)
out.putVarInt32(self.upsert_[i].ByteSizePartial())
self.upsert_[i].OutputPartial(out)
for i in xrange(len(self.update_)):
out.putVarInt32(18)
out.putVarInt32(self.update_[i].ByteSizePartial())
self.update_[i].OutputPartial(out)
for i in xrange(len(self.insert_)):
out.putVarInt32(26)
out.putVarInt32(self.insert_[i].ByteSizePartial())
self.insert_[i].OutputPartial(out)
for i in xrange(len(self.insert_auto_id_)):
out.putVarInt32(34)
out.putVarInt32(self.insert_auto_id_[i].ByteSizePartial())
self.insert_auto_id_[i].OutputPartial(out)
for i in xrange(len(self.delete_)):
out.putVarInt32(42)
out.putVarInt32(self.delete_[i].ByteSizePartial())
self.delete_[i].OutputPartial(out)
if (self.has_force_):
out.putVarInt32(48)
out.putBoolean(self.force_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_upsert().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_update().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_insert().TryMerge(tmp)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_insert_auto_id().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_delete().TryMerge(tmp)
continue
if tt == 48:
self.set_force(d.getBoolean())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.upsert_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("upsert%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.update_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("update%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.insert_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("insert%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.insert_auto_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("insert_auto_id%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.delete_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("delete%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_force_: res+=prefix+("force: %s\n" % self.DebugFormatBool(self.force_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kupsert = 1
kupdate = 2
kinsert = 3
kinsert_auto_id = 4
kdelete = 5
kforce = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "upsert",
2: "update",
3: "insert",
4: "insert_auto_id",
5: "delete",
6: "force",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.DeprecatedMutation'
class DeprecatedMutationResult(ProtocolBuffer.ProtocolMessage):
has_index_updates_ = 0
index_updates_ = 0
def __init__(self, contents=None):
self.insert_auto_id_key_ = []
self.upsert_version_ = []
self.update_version_ = []
self.insert_version_ = []
self.insert_auto_id_version_ = []
self.delete_version_ = []
if contents is not None: self.MergeFromString(contents)
def index_updates(self): return self.index_updates_
def set_index_updates(self, x):
self.has_index_updates_ = 1
self.index_updates_ = x
def clear_index_updates(self):
if self.has_index_updates_:
self.has_index_updates_ = 0
self.index_updates_ = 0
def has_index_updates(self): return self.has_index_updates_
def insert_auto_id_key_size(self): return len(self.insert_auto_id_key_)
def insert_auto_id_key_list(self): return self.insert_auto_id_key_
def insert_auto_id_key(self, i):
return self.insert_auto_id_key_[i]
def mutable_insert_auto_id_key(self, i):
return self.insert_auto_id_key_[i]
def add_insert_auto_id_key(self):
x = Key()
self.insert_auto_id_key_.append(x)
return x
def clear_insert_auto_id_key(self):
self.insert_auto_id_key_ = []
def upsert_version_size(self): return len(self.upsert_version_)
def upsert_version_list(self): return self.upsert_version_
def upsert_version(self, i):
return self.upsert_version_[i]
def set_upsert_version(self, i, x):
self.upsert_version_[i] = x
def add_upsert_version(self, x):
self.upsert_version_.append(x)
def clear_upsert_version(self):
self.upsert_version_ = []
def update_version_size(self): return len(self.update_version_)
def update_version_list(self): return self.update_version_
def update_version(self, i):
return self.update_version_[i]
def set_update_version(self, i, x):
self.update_version_[i] = x
def add_update_version(self, x):
self.update_version_.append(x)
def clear_update_version(self):
self.update_version_ = []
def insert_version_size(self): return len(self.insert_version_)
def insert_version_list(self): return self.insert_version_
def insert_version(self, i):
return self.insert_version_[i]
def set_insert_version(self, i, x):
self.insert_version_[i] = x
def add_insert_version(self, x):
self.insert_version_.append(x)
def clear_insert_version(self):
self.insert_version_ = []
def insert_auto_id_version_size(self): return len(self.insert_auto_id_version_)
def insert_auto_id_version_list(self): return self.insert_auto_id_version_
def insert_auto_id_version(self, i):
return self.insert_auto_id_version_[i]
def set_insert_auto_id_version(self, i, x):
self.insert_auto_id_version_[i] = x
def add_insert_auto_id_version(self, x):
self.insert_auto_id_version_.append(x)
def clear_insert_auto_id_version(self):
self.insert_auto_id_version_ = []
def delete_version_size(self): return len(self.delete_version_)
def delete_version_list(self): return self.delete_version_
def delete_version(self, i):
return self.delete_version_[i]
def set_delete_version(self, i, x):
self.delete_version_[i] = x
def add_delete_version(self, x):
self.delete_version_.append(x)
def clear_delete_version(self):
self.delete_version_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_index_updates()): self.set_index_updates(x.index_updates())
for i in xrange(x.insert_auto_id_key_size()): self.add_insert_auto_id_key().CopyFrom(x.insert_auto_id_key(i))
for i in xrange(x.upsert_version_size()): self.add_upsert_version(x.upsert_version(i))
for i in xrange(x.update_version_size()): self.add_update_version(x.update_version(i))
for i in xrange(x.insert_version_size()): self.add_insert_version(x.insert_version(i))
for i in xrange(x.insert_auto_id_version_size()): self.add_insert_auto_id_version(x.insert_auto_id_version(i))
for i in xrange(x.delete_version_size()): self.add_delete_version(x.delete_version(i))
def Equals(self, x):
if x is self: return 1
if self.has_index_updates_ != x.has_index_updates_: return 0
if self.has_index_updates_ and self.index_updates_ != x.index_updates_: return 0
if len(self.insert_auto_id_key_) != len(x.insert_auto_id_key_): return 0
for e1, e2 in zip(self.insert_auto_id_key_, x.insert_auto_id_key_):
if e1 != e2: return 0
if len(self.upsert_version_) != len(x.upsert_version_): return 0
for e1, e2 in zip(self.upsert_version_, x.upsert_version_):
if e1 != e2: return 0
if len(self.update_version_) != len(x.update_version_): return 0
for e1, e2 in zip(self.update_version_, x.update_version_):
if e1 != e2: return 0
if len(self.insert_version_) != len(x.insert_version_): return 0
for e1, e2 in zip(self.insert_version_, x.insert_version_):
if e1 != e2: return 0
if len(self.insert_auto_id_version_) != len(x.insert_auto_id_version_): return 0
for e1, e2 in zip(self.insert_auto_id_version_, x.insert_auto_id_version_):
if e1 != e2: return 0
if len(self.delete_version_) != len(x.delete_version_): return 0
for e1, e2 in zip(self.delete_version_, x.delete_version_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_updates_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_updates not set.')
for p in self.insert_auto_id_key_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.index_updates_)
n += 1 * len(self.insert_auto_id_key_)
for i in xrange(len(self.insert_auto_id_key_)): n += self.lengthString(self.insert_auto_id_key_[i].ByteSize())
n += 1 * len(self.upsert_version_)
for i in xrange(len(self.upsert_version_)): n += self.lengthVarInt64(self.upsert_version_[i])
n += 1 * len(self.update_version_)
for i in xrange(len(self.update_version_)): n += self.lengthVarInt64(self.update_version_[i])
n += 1 * len(self.insert_version_)
for i in xrange(len(self.insert_version_)): n += self.lengthVarInt64(self.insert_version_[i])
n += 1 * len(self.insert_auto_id_version_)
for i in xrange(len(self.insert_auto_id_version_)): n += self.lengthVarInt64(self.insert_auto_id_version_[i])
n += 1 * len(self.delete_version_)
for i in xrange(len(self.delete_version_)): n += self.lengthVarInt64(self.delete_version_[i])
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_updates_):
n += 1
n += self.lengthVarInt64(self.index_updates_)
n += 1 * len(self.insert_auto_id_key_)
for i in xrange(len(self.insert_auto_id_key_)): n += self.lengthString(self.insert_auto_id_key_[i].ByteSizePartial())
n += 1 * len(self.upsert_version_)
for i in xrange(len(self.upsert_version_)): n += self.lengthVarInt64(self.upsert_version_[i])
n += 1 * len(self.update_version_)
for i in xrange(len(self.update_version_)): n += self.lengthVarInt64(self.update_version_[i])
n += 1 * len(self.insert_version_)
for i in xrange(len(self.insert_version_)): n += self.lengthVarInt64(self.insert_version_[i])
n += 1 * len(self.insert_auto_id_version_)
for i in xrange(len(self.insert_auto_id_version_)): n += self.lengthVarInt64(self.insert_auto_id_version_[i])
n += 1 * len(self.delete_version_)
for i in xrange(len(self.delete_version_)): n += self.lengthVarInt64(self.delete_version_[i])
return n
def Clear(self):
self.clear_index_updates()
self.clear_insert_auto_id_key()
self.clear_upsert_version()
self.clear_update_version()
self.clear_insert_version()
self.clear_insert_auto_id_version()
self.clear_delete_version()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.index_updates_)
for i in xrange(len(self.insert_auto_id_key_)):
out.putVarInt32(18)
out.putVarInt32(self.insert_auto_id_key_[i].ByteSize())
self.insert_auto_id_key_[i].OutputUnchecked(out)
for i in xrange(len(self.upsert_version_)):
out.putVarInt32(24)
out.putVarInt64(self.upsert_version_[i])
for i in xrange(len(self.update_version_)):
out.putVarInt32(32)
out.putVarInt64(self.update_version_[i])
for i in xrange(len(self.insert_version_)):
out.putVarInt32(40)
out.putVarInt64(self.insert_version_[i])
for i in xrange(len(self.insert_auto_id_version_)):
out.putVarInt32(48)
out.putVarInt64(self.insert_auto_id_version_[i])
for i in xrange(len(self.delete_version_)):
out.putVarInt32(56)
out.putVarInt64(self.delete_version_[i])
def OutputPartial(self, out):
if (self.has_index_updates_):
out.putVarInt32(8)
out.putVarInt32(self.index_updates_)
for i in xrange(len(self.insert_auto_id_key_)):
out.putVarInt32(18)
out.putVarInt32(self.insert_auto_id_key_[i].ByteSizePartial())
self.insert_auto_id_key_[i].OutputPartial(out)
for i in xrange(len(self.upsert_version_)):
out.putVarInt32(24)
out.putVarInt64(self.upsert_version_[i])
for i in xrange(len(self.update_version_)):
out.putVarInt32(32)
out.putVarInt64(self.update_version_[i])
for i in xrange(len(self.insert_version_)):
out.putVarInt32(40)
out.putVarInt64(self.insert_version_[i])
for i in xrange(len(self.insert_auto_id_version_)):
out.putVarInt32(48)
out.putVarInt64(self.insert_auto_id_version_[i])
for i in xrange(len(self.delete_version_)):
out.putVarInt32(56)
out.putVarInt64(self.delete_version_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_updates(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_insert_auto_id_key().TryMerge(tmp)
continue
if tt == 24:
self.add_upsert_version(d.getVarInt64())
continue
if tt == 32:
self.add_update_version(d.getVarInt64())
continue
if tt == 40:
self.add_insert_version(d.getVarInt64())
continue
if tt == 48:
self.add_insert_auto_id_version(d.getVarInt64())
continue
if tt == 56:
self.add_delete_version(d.getVarInt64())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_updates_: res+=prefix+("index_updates: %s\n" % self.DebugFormatInt32(self.index_updates_))
cnt=0
for e in self.insert_auto_id_key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("insert_auto_id_key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.upsert_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("upsert_version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
cnt=0
for e in self.update_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("update_version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
cnt=0
for e in self.insert_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("insert_version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
cnt=0
for e in self.insert_auto_id_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("insert_auto_id_version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
cnt=0
for e in self.delete_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("delete_version%s: %s\n" % (elm, self.DebugFormatInt64(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_updates = 1
kinsert_auto_id_key = 2
kupsert_version = 3
kupdate_version = 4
kinsert_version = 5
kinsert_auto_id_version = 6
kdelete_version = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_updates",
2: "insert_auto_id_key",
3: "upsert_version",
4: "update_version",
5: "insert_version",
6: "insert_auto_id_version",
7: "delete_version",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.DeprecatedMutationResult'
class ReadOptions(ProtocolBuffer.ProtocolMessage):
# ReadConsistency values
DEFAULT = 0
STRONG = 1
EVENTUAL = 2
_ReadConsistency_NAMES = {
0: "DEFAULT",
1: "STRONG",
2: "EVENTUAL",
}
def ReadConsistency_Name(cls, x): return cls._ReadConsistency_NAMES.get(x, "")
ReadConsistency_Name = classmethod(ReadConsistency_Name)
has_read_consistency_ = 0
read_consistency_ = 0
has_transaction_ = 0
transaction_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def read_consistency(self): return self.read_consistency_
def set_read_consistency(self, x):
self.has_read_consistency_ = 1
self.read_consistency_ = x
def clear_read_consistency(self):
if self.has_read_consistency_:
self.has_read_consistency_ = 0
self.read_consistency_ = 0
def has_read_consistency(self): return self.has_read_consistency_
def transaction(self): return self.transaction_
def set_transaction(self, x):
self.has_transaction_ = 1
self.transaction_ = x
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0
self.transaction_ = ""
def has_transaction(self): return self.has_transaction_
def MergeFrom(self, x):
assert x is not self
if (x.has_read_consistency()): self.set_read_consistency(x.read_consistency())
if (x.has_transaction()): self.set_transaction(x.transaction())
def Equals(self, x):
if x is self: return 1
if self.has_read_consistency_ != x.has_read_consistency_: return 0
if self.has_read_consistency_ and self.read_consistency_ != x.read_consistency_: return 0
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_read_consistency_): n += 1 + self.lengthVarInt64(self.read_consistency_)
if (self.has_transaction_): n += 1 + self.lengthString(len(self.transaction_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_read_consistency_): n += 1 + self.lengthVarInt64(self.read_consistency_)
if (self.has_transaction_): n += 1 + self.lengthString(len(self.transaction_))
return n
def Clear(self):
self.clear_read_consistency()
self.clear_transaction()
def OutputUnchecked(self, out):
if (self.has_read_consistency_):
out.putVarInt32(8)
out.putVarInt32(self.read_consistency_)
if (self.has_transaction_):
out.putVarInt32(18)
out.putPrefixedString(self.transaction_)
def OutputPartial(self, out):
if (self.has_read_consistency_):
out.putVarInt32(8)
out.putVarInt32(self.read_consistency_)
if (self.has_transaction_):
out.putVarInt32(18)
out.putPrefixedString(self.transaction_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_read_consistency(d.getVarInt32())
continue
if tt == 18:
self.set_transaction(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_read_consistency_: res+=prefix+("read_consistency: %s\n" % self.DebugFormatInt32(self.read_consistency_))
if self.has_transaction_: res+=prefix+("transaction: %s\n" % self.DebugFormatString(self.transaction_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kread_consistency = 1
ktransaction = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "read_consistency",
2: "transaction",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.ReadOptions'
class LookupRequest(ProtocolBuffer.ProtocolMessage):
has_read_options_ = 0
read_options_ = None
def __init__(self, contents=None):
self.key_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def read_options(self):
if self.read_options_ is None:
self.lazy_init_lock_.acquire()
try:
if self.read_options_ is None: self.read_options_ = ReadOptions()
finally:
self.lazy_init_lock_.release()
return self.read_options_
def mutable_read_options(self): self.has_read_options_ = 1; return self.read_options()
def clear_read_options(self):
# Warning: this method does not acquire the lock.
if self.has_read_options_:
self.has_read_options_ = 0;
if self.read_options_ is not None: self.read_options_.Clear()
def has_read_options(self): return self.has_read_options_
def key_size(self): return len(self.key_)
def key_list(self): return self.key_
def key(self, i):
return self.key_[i]
def mutable_key(self, i):
return self.key_[i]
def add_key(self):
x = Key()
self.key_.append(x)
return x
def clear_key(self):
self.key_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_read_options()): self.mutable_read_options().MergeFrom(x.read_options())
for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
def Equals(self, x):
if x is self: return 1
if self.has_read_options_ != x.has_read_options_: return 0
if self.has_read_options_ and self.read_options_ != x.read_options_: return 0
if len(self.key_) != len(x.key_): return 0
for e1, e2 in zip(self.key_, x.key_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_read_options_ and not self.read_options_.IsInitialized(debug_strs)): initialized = 0
for p in self.key_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_read_options_): n += 1 + self.lengthString(self.read_options_.ByteSize())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_read_options_): n += 1 + self.lengthString(self.read_options_.ByteSizePartial())
n += 1 * len(self.key_)
for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_read_options()
self.clear_key()
def OutputUnchecked(self, out):
if (self.has_read_options_):
out.putVarInt32(10)
out.putVarInt32(self.read_options_.ByteSize())
self.read_options_.OutputUnchecked(out)
for i in xrange(len(self.key_)):
out.putVarInt32(26)
out.putVarInt32(self.key_[i].ByteSize())
self.key_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_read_options_):
out.putVarInt32(10)
out.putVarInt32(self.read_options_.ByteSizePartial())
self.read_options_.OutputPartial(out)
for i in xrange(len(self.key_)):
out.putVarInt32(26)
out.putVarInt32(self.key_[i].ByteSizePartial())
self.key_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_read_options().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_key().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_read_options_:
res+=prefix+"read_options <\n"
res+=self.read_options_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("key%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kread_options = 1
kkey = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "read_options",
3: "key",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.LookupRequest'
class LookupResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.found_ = []
self.missing_ = []
self.deferred_ = []
if contents is not None: self.MergeFromString(contents)
def found_size(self): return len(self.found_)
def found_list(self): return self.found_
def found(self, i):
return self.found_[i]
def mutable_found(self, i):
return self.found_[i]
def add_found(self):
x = EntityResult()
self.found_.append(x)
return x
def clear_found(self):
self.found_ = []
def missing_size(self): return len(self.missing_)
def missing_list(self): return self.missing_
def missing(self, i):
return self.missing_[i]
def mutable_missing(self, i):
return self.missing_[i]
def add_missing(self):
x = EntityResult()
self.missing_.append(x)
return x
def clear_missing(self):
self.missing_ = []
def deferred_size(self): return len(self.deferred_)
def deferred_list(self): return self.deferred_
def deferred(self, i):
return self.deferred_[i]
def mutable_deferred(self, i):
return self.deferred_[i]
def add_deferred(self):
x = Key()
self.deferred_.append(x)
return x
def clear_deferred(self):
self.deferred_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.found_size()): self.add_found().CopyFrom(x.found(i))
for i in xrange(x.missing_size()): self.add_missing().CopyFrom(x.missing(i))
for i in xrange(x.deferred_size()): self.add_deferred().CopyFrom(x.deferred(i))
def Equals(self, x):
if x is self: return 1
if len(self.found_) != len(x.found_): return 0
for e1, e2 in zip(self.found_, x.found_):
if e1 != e2: return 0
if len(self.missing_) != len(x.missing_): return 0
for e1, e2 in zip(self.missing_, x.missing_):
if e1 != e2: return 0
if len(self.deferred_) != len(x.deferred_): return 0
for e1, e2 in zip(self.deferred_, x.deferred_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.found_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.missing_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.deferred_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.found_)
for i in xrange(len(self.found_)): n += self.lengthString(self.found_[i].ByteSize())
n += 1 * len(self.missing_)
for i in xrange(len(self.missing_)): n += self.lengthString(self.missing_[i].ByteSize())
n += 1 * len(self.deferred_)
for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.found_)
for i in xrange(len(self.found_)): n += self.lengthString(self.found_[i].ByteSizePartial())
n += 1 * len(self.missing_)
for i in xrange(len(self.missing_)): n += self.lengthString(self.missing_[i].ByteSizePartial())
n += 1 * len(self.deferred_)
for i in xrange(len(self.deferred_)): n += self.lengthString(self.deferred_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_found()
self.clear_missing()
self.clear_deferred()
def OutputUnchecked(self, out):
for i in xrange(len(self.found_)):
out.putVarInt32(10)
out.putVarInt32(self.found_[i].ByteSize())
self.found_[i].OutputUnchecked(out)
for i in xrange(len(self.missing_)):
out.putVarInt32(18)
out.putVarInt32(self.missing_[i].ByteSize())
self.missing_[i].OutputUnchecked(out)
for i in xrange(len(self.deferred_)):
out.putVarInt32(26)
out.putVarInt32(self.deferred_[i].ByteSize())
self.deferred_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.found_)):
out.putVarInt32(10)
out.putVarInt32(self.found_[i].ByteSizePartial())
self.found_[i].OutputPartial(out)
for i in xrange(len(self.missing_)):
out.putVarInt32(18)
out.putVarInt32(self.missing_[i].ByteSizePartial())
self.missing_[i].OutputPartial(out)
for i in xrange(len(self.deferred_)):
out.putVarInt32(26)
out.putVarInt32(self.deferred_[i].ByteSizePartial())
self.deferred_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_found().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_missing().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_deferred().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.found_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("found%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.missing_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("missing%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.deferred_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("deferred%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kfound = 1
kmissing = 2
kdeferred = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "found",
2: "missing",
3: "deferred",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.LookupResponse'
class RunQueryRequest(ProtocolBuffer.ProtocolMessage):
has_read_options_ = 0
read_options_ = None
has_partition_id_ = 0
partition_id_ = None
has_query_ = 0
query_ = None
has_gql_query_ = 0
gql_query_ = None
has_min_safe_time_seconds_ = 0
min_safe_time_seconds_ = 0
has_suggested_batch_size_ = 0
suggested_batch_size_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def read_options(self):
if self.read_options_ is None:
self.lazy_init_lock_.acquire()
try:
if self.read_options_ is None: self.read_options_ = ReadOptions()
finally:
self.lazy_init_lock_.release()
return self.read_options_
def mutable_read_options(self): self.has_read_options_ = 1; return self.read_options()
def clear_read_options(self):
# Warning: this method does not acquire the lock.
if self.has_read_options_:
self.has_read_options_ = 0;
if self.read_options_ is not None: self.read_options_.Clear()
def has_read_options(self): return self.has_read_options_
def partition_id(self):
if self.partition_id_ is None:
self.lazy_init_lock_.acquire()
try:
if self.partition_id_ is None: self.partition_id_ = PartitionId()
finally:
self.lazy_init_lock_.release()
return self.partition_id_
def mutable_partition_id(self): self.has_partition_id_ = 1; return self.partition_id()
def clear_partition_id(self):
# Warning: this method does not acquire the lock.
if self.has_partition_id_:
self.has_partition_id_ = 0;
if self.partition_id_ is not None: self.partition_id_.Clear()
def has_partition_id(self): return self.has_partition_id_
def query(self):
if self.query_ is None:
self.lazy_init_lock_.acquire()
try:
if self.query_ is None: self.query_ = Query()
finally:
self.lazy_init_lock_.release()
return self.query_
def mutable_query(self): self.has_query_ = 1; return self.query()
def clear_query(self):
# Warning: this method does not acquire the lock.
if self.has_query_:
self.has_query_ = 0;
if self.query_ is not None: self.query_.Clear()
def has_query(self): return self.has_query_
def gql_query(self):
if self.gql_query_ is None:
self.lazy_init_lock_.acquire()
try:
if self.gql_query_ is None: self.gql_query_ = GqlQuery()
finally:
self.lazy_init_lock_.release()
return self.gql_query_
def mutable_gql_query(self): self.has_gql_query_ = 1; return self.gql_query()
def clear_gql_query(self):
# Warning: this method does not acquire the lock.
if self.has_gql_query_:
self.has_gql_query_ = 0;
if self.gql_query_ is not None: self.gql_query_.Clear()
def has_gql_query(self): return self.has_gql_query_
def min_safe_time_seconds(self): return self.min_safe_time_seconds_
def set_min_safe_time_seconds(self, x):
self.has_min_safe_time_seconds_ = 1
self.min_safe_time_seconds_ = x
def clear_min_safe_time_seconds(self):
if self.has_min_safe_time_seconds_:
self.has_min_safe_time_seconds_ = 0
self.min_safe_time_seconds_ = 0
def has_min_safe_time_seconds(self): return self.has_min_safe_time_seconds_
def suggested_batch_size(self): return self.suggested_batch_size_
def set_suggested_batch_size(self, x):
self.has_suggested_batch_size_ = 1
self.suggested_batch_size_ = x
def clear_suggested_batch_size(self):
if self.has_suggested_batch_size_:
self.has_suggested_batch_size_ = 0
self.suggested_batch_size_ = 0
def has_suggested_batch_size(self): return self.has_suggested_batch_size_
def MergeFrom(self, x):
assert x is not self
if (x.has_read_options()): self.mutable_read_options().MergeFrom(x.read_options())
if (x.has_partition_id()): self.mutable_partition_id().MergeFrom(x.partition_id())
if (x.has_query()): self.mutable_query().MergeFrom(x.query())
if (x.has_gql_query()): self.mutable_gql_query().MergeFrom(x.gql_query())
if (x.has_min_safe_time_seconds()): self.set_min_safe_time_seconds(x.min_safe_time_seconds())
if (x.has_suggested_batch_size()): self.set_suggested_batch_size(x.suggested_batch_size())
def Equals(self, x):
if x is self: return 1
if self.has_read_options_ != x.has_read_options_: return 0
if self.has_read_options_ and self.read_options_ != x.read_options_: return 0
if self.has_partition_id_ != x.has_partition_id_: return 0
if self.has_partition_id_ and self.partition_id_ != x.partition_id_: return 0
if self.has_query_ != x.has_query_: return 0
if self.has_query_ and self.query_ != x.query_: return 0
if self.has_gql_query_ != x.has_gql_query_: return 0
if self.has_gql_query_ and self.gql_query_ != x.gql_query_: return 0
if self.has_min_safe_time_seconds_ != x.has_min_safe_time_seconds_: return 0
if self.has_min_safe_time_seconds_ and self.min_safe_time_seconds_ != x.min_safe_time_seconds_: return 0
if self.has_suggested_batch_size_ != x.has_suggested_batch_size_: return 0
if self.has_suggested_batch_size_ and self.suggested_batch_size_ != x.suggested_batch_size_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_read_options_ and not self.read_options_.IsInitialized(debug_strs)): initialized = 0
if (self.has_partition_id_ and not self.partition_id_.IsInitialized(debug_strs)): initialized = 0
if (self.has_query_ and not self.query_.IsInitialized(debug_strs)): initialized = 0
if (self.has_gql_query_ and not self.gql_query_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_read_options_): n += 1 + self.lengthString(self.read_options_.ByteSize())
if (self.has_partition_id_): n += 1 + self.lengthString(self.partition_id_.ByteSize())
if (self.has_query_): n += 1 + self.lengthString(self.query_.ByteSize())
if (self.has_gql_query_): n += 1 + self.lengthString(self.gql_query_.ByteSize())
if (self.has_min_safe_time_seconds_): n += 1 + self.lengthVarInt64(self.min_safe_time_seconds_)
if (self.has_suggested_batch_size_): n += 1 + self.lengthVarInt64(self.suggested_batch_size_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_read_options_): n += 1 + self.lengthString(self.read_options_.ByteSizePartial())
if (self.has_partition_id_): n += 1 + self.lengthString(self.partition_id_.ByteSizePartial())
if (self.has_query_): n += 1 + self.lengthString(self.query_.ByteSizePartial())
if (self.has_gql_query_): n += 1 + self.lengthString(self.gql_query_.ByteSizePartial())
if (self.has_min_safe_time_seconds_): n += 1 + self.lengthVarInt64(self.min_safe_time_seconds_)
if (self.has_suggested_batch_size_): n += 1 + self.lengthVarInt64(self.suggested_batch_size_)
return n
def Clear(self):
self.clear_read_options()
self.clear_partition_id()
self.clear_query()
self.clear_gql_query()
self.clear_min_safe_time_seconds()
self.clear_suggested_batch_size()
def OutputUnchecked(self, out):
if (self.has_read_options_):
out.putVarInt32(10)
out.putVarInt32(self.read_options_.ByteSize())
self.read_options_.OutputUnchecked(out)
if (self.has_partition_id_):
out.putVarInt32(18)
out.putVarInt32(self.partition_id_.ByteSize())
self.partition_id_.OutputUnchecked(out)
if (self.has_query_):
out.putVarInt32(26)
out.putVarInt32(self.query_.ByteSize())
self.query_.OutputUnchecked(out)
if (self.has_min_safe_time_seconds_):
out.putVarInt32(32)
out.putVarInt64(self.min_safe_time_seconds_)
if (self.has_suggested_batch_size_):
out.putVarInt32(40)
out.putVarInt32(self.suggested_batch_size_)
if (self.has_gql_query_):
out.putVarInt32(58)
out.putVarInt32(self.gql_query_.ByteSize())
self.gql_query_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_read_options_):
out.putVarInt32(10)
out.putVarInt32(self.read_options_.ByteSizePartial())
self.read_options_.OutputPartial(out)
if (self.has_partition_id_):
out.putVarInt32(18)
out.putVarInt32(self.partition_id_.ByteSizePartial())
self.partition_id_.OutputPartial(out)
if (self.has_query_):
out.putVarInt32(26)
out.putVarInt32(self.query_.ByteSizePartial())
self.query_.OutputPartial(out)
if (self.has_min_safe_time_seconds_):
out.putVarInt32(32)
out.putVarInt64(self.min_safe_time_seconds_)
if (self.has_suggested_batch_size_):
out.putVarInt32(40)
out.putVarInt32(self.suggested_batch_size_)
if (self.has_gql_query_):
out.putVarInt32(58)
out.putVarInt32(self.gql_query_.ByteSizePartial())
self.gql_query_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_read_options().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_partition_id().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_query().TryMerge(tmp)
continue
if tt == 32:
self.set_min_safe_time_seconds(d.getVarInt64())
continue
if tt == 40:
self.set_suggested_batch_size(d.getVarInt32())
continue
if tt == 58:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_gql_query().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_read_options_:
res+=prefix+"read_options <\n"
res+=self.read_options_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_partition_id_:
res+=prefix+"partition_id <\n"
res+=self.partition_id_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_query_:
res+=prefix+"query <\n"
res+=self.query_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_gql_query_:
res+=prefix+"gql_query <\n"
res+=self.gql_query_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_min_safe_time_seconds_: res+=prefix+("min_safe_time_seconds: %s\n" % self.DebugFormatInt64(self.min_safe_time_seconds_))
if self.has_suggested_batch_size_: res+=prefix+("suggested_batch_size: %s\n" % self.DebugFormatInt32(self.suggested_batch_size_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kread_options = 1
kpartition_id = 2
kquery = 3
kgql_query = 7
kmin_safe_time_seconds = 4
ksuggested_batch_size = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "read_options",
2: "partition_id",
3: "query",
4: "min_safe_time_seconds",
5: "suggested_batch_size",
7: "gql_query",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.RunQueryRequest'
class RunQueryResponse(ProtocolBuffer.ProtocolMessage):
has_batch_ = 0
has_query_handle_ = 0
query_handle_ = ""
def __init__(self, contents=None):
self.batch_ = QueryResultBatch()
if contents is not None: self.MergeFromString(contents)
def batch(self): return self.batch_
def mutable_batch(self): self.has_batch_ = 1; return self.batch_
def clear_batch(self):self.has_batch_ = 0; self.batch_.Clear()
def has_batch(self): return self.has_batch_
def query_handle(self): return self.query_handle_
def set_query_handle(self, x):
self.has_query_handle_ = 1
self.query_handle_ = x
def clear_query_handle(self):
if self.has_query_handle_:
self.has_query_handle_ = 0
self.query_handle_ = ""
def has_query_handle(self): return self.has_query_handle_
def MergeFrom(self, x):
assert x is not self
if (x.has_batch()): self.mutable_batch().MergeFrom(x.batch())
if (x.has_query_handle()): self.set_query_handle(x.query_handle())
def Equals(self, x):
if x is self: return 1
if self.has_batch_ != x.has_batch_: return 0
if self.has_batch_ and self.batch_ != x.batch_: return 0
if self.has_query_handle_ != x.has_query_handle_: return 0
if self.has_query_handle_ and self.query_handle_ != x.query_handle_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_batch_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: batch not set.')
elif not self.batch_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.batch_.ByteSize())
if (self.has_query_handle_): n += 1 + self.lengthString(len(self.query_handle_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_batch_):
n += 1
n += self.lengthString(self.batch_.ByteSizePartial())
if (self.has_query_handle_): n += 1 + self.lengthString(len(self.query_handle_))
return n
def Clear(self):
self.clear_batch()
self.clear_query_handle()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.batch_.ByteSize())
self.batch_.OutputUnchecked(out)
if (self.has_query_handle_):
out.putVarInt32(18)
out.putPrefixedString(self.query_handle_)
def OutputPartial(self, out):
if (self.has_batch_):
out.putVarInt32(10)
out.putVarInt32(self.batch_.ByteSizePartial())
self.batch_.OutputPartial(out)
if (self.has_query_handle_):
out.putVarInt32(18)
out.putPrefixedString(self.query_handle_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_batch().TryMerge(tmp)
continue
if tt == 18:
self.set_query_handle(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_batch_:
res+=prefix+"batch <\n"
res+=self.batch_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_query_handle_: res+=prefix+("query_handle: %s\n" % self.DebugFormatString(self.query_handle_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kbatch = 1
kquery_handle = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "batch",
2: "query_handle",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.RunQueryResponse'
class ContinueQueryRequest(ProtocolBuffer.ProtocolMessage):
has_query_handle_ = 0
query_handle_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def query_handle(self): return self.query_handle_
def set_query_handle(self, x):
self.has_query_handle_ = 1
self.query_handle_ = x
def clear_query_handle(self):
if self.has_query_handle_:
self.has_query_handle_ = 0
self.query_handle_ = ""
def has_query_handle(self): return self.has_query_handle_
def MergeFrom(self, x):
assert x is not self
if (x.has_query_handle()): self.set_query_handle(x.query_handle())
def Equals(self, x):
if x is self: return 1
if self.has_query_handle_ != x.has_query_handle_: return 0
if self.has_query_handle_ and self.query_handle_ != x.query_handle_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_query_handle_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: query_handle not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.query_handle_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_query_handle_):
n += 1
n += self.lengthString(len(self.query_handle_))
return n
def Clear(self):
self.clear_query_handle()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.query_handle_)
def OutputPartial(self, out):
if (self.has_query_handle_):
out.putVarInt32(10)
out.putPrefixedString(self.query_handle_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_query_handle(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_query_handle_: res+=prefix+("query_handle: %s\n" % self.DebugFormatString(self.query_handle_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kquery_handle = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "query_handle",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.ContinueQueryRequest'
class ContinueQueryResponse(ProtocolBuffer.ProtocolMessage):
has_batch_ = 0
def __init__(self, contents=None):
self.batch_ = QueryResultBatch()
if contents is not None: self.MergeFromString(contents)
def batch(self): return self.batch_
def mutable_batch(self): self.has_batch_ = 1; return self.batch_
def clear_batch(self):self.has_batch_ = 0; self.batch_.Clear()
def has_batch(self): return self.has_batch_
def MergeFrom(self, x):
assert x is not self
if (x.has_batch()): self.mutable_batch().MergeFrom(x.batch())
def Equals(self, x):
if x is self: return 1
if self.has_batch_ != x.has_batch_: return 0
if self.has_batch_ and self.batch_ != x.batch_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_batch_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: batch not set.')
elif not self.batch_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.batch_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_batch_):
n += 1
n += self.lengthString(self.batch_.ByteSizePartial())
return n
def Clear(self):
self.clear_batch()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.batch_.ByteSize())
self.batch_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_batch_):
out.putVarInt32(10)
out.putVarInt32(self.batch_.ByteSizePartial())
self.batch_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_batch().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_batch_:
res+=prefix+"batch <\n"
res+=self.batch_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kbatch = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "batch",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.ContinueQueryResponse'
class BeginTransactionRequest(ProtocolBuffer.ProtocolMessage):
has_cross_group_ = 0
cross_group_ = 0
has_cross_request_ = 0
cross_request_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def cross_group(self): return self.cross_group_
def set_cross_group(self, x):
self.has_cross_group_ = 1
self.cross_group_ = x
def clear_cross_group(self):
if self.has_cross_group_:
self.has_cross_group_ = 0
self.cross_group_ = 0
def has_cross_group(self): return self.has_cross_group_
def cross_request(self): return self.cross_request_
def set_cross_request(self, x):
self.has_cross_request_ = 1
self.cross_request_ = x
def clear_cross_request(self):
if self.has_cross_request_:
self.has_cross_request_ = 0
self.cross_request_ = 0
def has_cross_request(self): return self.has_cross_request_
def MergeFrom(self, x):
assert x is not self
if (x.has_cross_group()): self.set_cross_group(x.cross_group())
if (x.has_cross_request()): self.set_cross_request(x.cross_request())
def Equals(self, x):
if x is self: return 1
if self.has_cross_group_ != x.has_cross_group_: return 0
if self.has_cross_group_ and self.cross_group_ != x.cross_group_: return 0
if self.has_cross_request_ != x.has_cross_request_: return 0
if self.has_cross_request_ and self.cross_request_ != x.cross_request_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_cross_group_): n += 2
if (self.has_cross_request_): n += 2
return n
def ByteSizePartial(self):
n = 0
if (self.has_cross_group_): n += 2
if (self.has_cross_request_): n += 2
return n
def Clear(self):
self.clear_cross_group()
self.clear_cross_request()
def OutputUnchecked(self, out):
if (self.has_cross_group_):
out.putVarInt32(8)
out.putBoolean(self.cross_group_)
if (self.has_cross_request_):
out.putVarInt32(16)
out.putBoolean(self.cross_request_)
def OutputPartial(self, out):
if (self.has_cross_group_):
out.putVarInt32(8)
out.putBoolean(self.cross_group_)
if (self.has_cross_request_):
out.putVarInt32(16)
out.putBoolean(self.cross_request_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_cross_group(d.getBoolean())
continue
if tt == 16:
self.set_cross_request(d.getBoolean())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cross_group_: res+=prefix+("cross_group: %s\n" % self.DebugFormatBool(self.cross_group_))
if self.has_cross_request_: res+=prefix+("cross_request: %s\n" % self.DebugFormatBool(self.cross_request_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcross_group = 1
kcross_request = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cross_group",
2: "cross_request",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.BeginTransactionRequest'
class BeginTransactionResponse(ProtocolBuffer.ProtocolMessage):
has_transaction_ = 0
transaction_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def transaction(self): return self.transaction_
def set_transaction(self, x):
self.has_transaction_ = 1
self.transaction_ = x
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0
self.transaction_ = ""
def has_transaction(self): return self.has_transaction_
def MergeFrom(self, x):
assert x is not self
if (x.has_transaction()): self.set_transaction(x.transaction())
def Equals(self, x):
if x is self: return 1
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_transaction_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: transaction not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.transaction_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_transaction_):
n += 1
n += self.lengthString(len(self.transaction_))
return n
def Clear(self):
self.clear_transaction()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
def OutputPartial(self, out):
if (self.has_transaction_):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_transaction(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_transaction_: res+=prefix+("transaction: %s\n" % self.DebugFormatString(self.transaction_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktransaction = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "transaction",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.BeginTransactionResponse'
class RollbackRequest(ProtocolBuffer.ProtocolMessage):
has_transaction_ = 0
transaction_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def transaction(self): return self.transaction_
def set_transaction(self, x):
self.has_transaction_ = 1
self.transaction_ = x
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0
self.transaction_ = ""
def has_transaction(self): return self.has_transaction_
def MergeFrom(self, x):
assert x is not self
if (x.has_transaction()): self.set_transaction(x.transaction())
def Equals(self, x):
if x is self: return 1
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_transaction_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: transaction not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.transaction_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_transaction_):
n += 1
n += self.lengthString(len(self.transaction_))
return n
def Clear(self):
self.clear_transaction()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
def OutputPartial(self, out):
if (self.has_transaction_):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_transaction(d.getPrefixedString())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_transaction_: res+=prefix+("transaction: %s\n" % self.DebugFormatString(self.transaction_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktransaction = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "transaction",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.RollbackRequest'
class RollbackResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.RollbackResponse'
class CommitRequest(ProtocolBuffer.ProtocolMessage):
# Mode values
TRANSACTIONAL = 1
NON_TRANSACTIONAL = 2
_Mode_NAMES = {
1: "TRANSACTIONAL",
2: "NON_TRANSACTIONAL",
}
def Mode_Name(cls, x): return cls._Mode_NAMES.get(x, "")
Mode_Name = classmethod(Mode_Name)
has_transaction_ = 0
transaction_ = ""
has_deprecated_mutation_ = 0
deprecated_mutation_ = None
has_mode_ = 0
mode_ = 1
has_ignore_read_only_ = 0
ignore_read_only_ = 0
def __init__(self, contents=None):
self.mutation_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def transaction(self): return self.transaction_
def set_transaction(self, x):
self.has_transaction_ = 1
self.transaction_ = x
def clear_transaction(self):
if self.has_transaction_:
self.has_transaction_ = 0
self.transaction_ = ""
def has_transaction(self): return self.has_transaction_
def mutation_size(self): return len(self.mutation_)
def mutation_list(self): return self.mutation_
def mutation(self, i):
return self.mutation_[i]
def mutable_mutation(self, i):
return self.mutation_[i]
def add_mutation(self):
x = Mutation()
self.mutation_.append(x)
return x
def clear_mutation(self):
self.mutation_ = []
def deprecated_mutation(self):
if self.deprecated_mutation_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deprecated_mutation_ is None: self.deprecated_mutation_ = DeprecatedMutation()
finally:
self.lazy_init_lock_.release()
return self.deprecated_mutation_
def mutable_deprecated_mutation(self): self.has_deprecated_mutation_ = 1; return self.deprecated_mutation()
def clear_deprecated_mutation(self):
# Warning: this method does not acquire the lock.
if self.has_deprecated_mutation_:
self.has_deprecated_mutation_ = 0;
if self.deprecated_mutation_ is not None: self.deprecated_mutation_.Clear()
def has_deprecated_mutation(self): return self.has_deprecated_mutation_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 1
def has_mode(self): return self.has_mode_
def ignore_read_only(self): return self.ignore_read_only_
def set_ignore_read_only(self, x):
self.has_ignore_read_only_ = 1
self.ignore_read_only_ = x
def clear_ignore_read_only(self):
if self.has_ignore_read_only_:
self.has_ignore_read_only_ = 0
self.ignore_read_only_ = 0
def has_ignore_read_only(self): return self.has_ignore_read_only_
def MergeFrom(self, x):
assert x is not self
if (x.has_transaction()): self.set_transaction(x.transaction())
for i in xrange(x.mutation_size()): self.add_mutation().CopyFrom(x.mutation(i))
if (x.has_deprecated_mutation()): self.mutable_deprecated_mutation().MergeFrom(x.deprecated_mutation())
if (x.has_mode()): self.set_mode(x.mode())
if (x.has_ignore_read_only()): self.set_ignore_read_only(x.ignore_read_only())
def Equals(self, x):
if x is self: return 1
if self.has_transaction_ != x.has_transaction_: return 0
if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
if len(self.mutation_) != len(x.mutation_): return 0
for e1, e2 in zip(self.mutation_, x.mutation_):
if e1 != e2: return 0
if self.has_deprecated_mutation_ != x.has_deprecated_mutation_: return 0
if self.has_deprecated_mutation_ and self.deprecated_mutation_ != x.deprecated_mutation_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
if self.has_ignore_read_only_ != x.has_ignore_read_only_: return 0
if self.has_ignore_read_only_ and self.ignore_read_only_ != x.ignore_read_only_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.mutation_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_deprecated_mutation_ and not self.deprecated_mutation_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_transaction_): n += 1 + self.lengthString(len(self.transaction_))
n += 1 * len(self.mutation_)
for i in xrange(len(self.mutation_)): n += self.lengthString(self.mutation_[i].ByteSize())
if (self.has_deprecated_mutation_): n += 1 + self.lengthString(self.deprecated_mutation_.ByteSize())
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_ignore_read_only_): n += 2
return n
def ByteSizePartial(self):
n = 0
if (self.has_transaction_): n += 1 + self.lengthString(len(self.transaction_))
n += 1 * len(self.mutation_)
for i in xrange(len(self.mutation_)): n += self.lengthString(self.mutation_[i].ByteSizePartial())
if (self.has_deprecated_mutation_): n += 1 + self.lengthString(self.deprecated_mutation_.ByteSizePartial())
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
if (self.has_ignore_read_only_): n += 2
return n
def Clear(self):
self.clear_transaction()
self.clear_mutation()
self.clear_deprecated_mutation()
self.clear_mode()
self.clear_ignore_read_only()
def OutputUnchecked(self, out):
if (self.has_transaction_):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
if (self.has_deprecated_mutation_):
out.putVarInt32(18)
out.putVarInt32(self.deprecated_mutation_.ByteSize())
self.deprecated_mutation_.OutputUnchecked(out)
if (self.has_mode_):
out.putVarInt32(32)
out.putVarInt32(self.mode_)
for i in xrange(len(self.mutation_)):
out.putVarInt32(42)
out.putVarInt32(self.mutation_[i].ByteSize())
self.mutation_[i].OutputUnchecked(out)
if (self.has_ignore_read_only_):
out.putVarInt32(48)
out.putBoolean(self.ignore_read_only_)
def OutputPartial(self, out):
if (self.has_transaction_):
out.putVarInt32(10)
out.putPrefixedString(self.transaction_)
if (self.has_deprecated_mutation_):
out.putVarInt32(18)
out.putVarInt32(self.deprecated_mutation_.ByteSizePartial())
self.deprecated_mutation_.OutputPartial(out)
if (self.has_mode_):
out.putVarInt32(32)
out.putVarInt32(self.mode_)
for i in xrange(len(self.mutation_)):
out.putVarInt32(42)
out.putVarInt32(self.mutation_[i].ByteSizePartial())
self.mutation_[i].OutputPartial(out)
if (self.has_ignore_read_only_):
out.putVarInt32(48)
out.putBoolean(self.ignore_read_only_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_transaction(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deprecated_mutation().TryMerge(tmp)
continue
if tt == 32:
self.set_mode(d.getVarInt32())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_mutation().TryMerge(tmp)
continue
if tt == 48:
self.set_ignore_read_only(d.getBoolean())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_transaction_: res+=prefix+("transaction: %s\n" % self.DebugFormatString(self.transaction_))
cnt=0
for e in self.mutation_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("mutation%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_deprecated_mutation_:
res+=prefix+"deprecated_mutation <\n"
res+=self.deprecated_mutation_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
if self.has_ignore_read_only_: res+=prefix+("ignore_read_only: %s\n" % self.DebugFormatBool(self.ignore_read_only_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ktransaction = 1
kmutation = 5
kdeprecated_mutation = 2
kmode = 4
kignore_read_only = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "transaction",
2: "deprecated_mutation",
4: "mode",
5: "mutation",
6: "ignore_read_only",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.CommitRequest'
class CommitResponse(ProtocolBuffer.ProtocolMessage):
has_deprecated_mutation_result_ = 0
deprecated_mutation_result_ = None
has_index_updates_ = 0
index_updates_ = 0
def __init__(self, contents=None):
self.mutation_result_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def mutation_result_size(self): return len(self.mutation_result_)
def mutation_result_list(self): return self.mutation_result_
def mutation_result(self, i):
return self.mutation_result_[i]
def mutable_mutation_result(self, i):
return self.mutation_result_[i]
def add_mutation_result(self):
x = MutationResult()
self.mutation_result_.append(x)
return x
def clear_mutation_result(self):
self.mutation_result_ = []
def deprecated_mutation_result(self):
if self.deprecated_mutation_result_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deprecated_mutation_result_ is None: self.deprecated_mutation_result_ = DeprecatedMutationResult()
finally:
self.lazy_init_lock_.release()
return self.deprecated_mutation_result_
def mutable_deprecated_mutation_result(self): self.has_deprecated_mutation_result_ = 1; return self.deprecated_mutation_result()
def clear_deprecated_mutation_result(self):
# Warning: this method does not acquire the lock.
if self.has_deprecated_mutation_result_:
self.has_deprecated_mutation_result_ = 0;
if self.deprecated_mutation_result_ is not None: self.deprecated_mutation_result_.Clear()
def has_deprecated_mutation_result(self): return self.has_deprecated_mutation_result_
def index_updates(self): return self.index_updates_
def set_index_updates(self, x):
self.has_index_updates_ = 1
self.index_updates_ = x
def clear_index_updates(self):
if self.has_index_updates_:
self.has_index_updates_ = 0
self.index_updates_ = 0
def has_index_updates(self): return self.has_index_updates_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.mutation_result_size()): self.add_mutation_result().CopyFrom(x.mutation_result(i))
if (x.has_deprecated_mutation_result()): self.mutable_deprecated_mutation_result().MergeFrom(x.deprecated_mutation_result())
if (x.has_index_updates()): self.set_index_updates(x.index_updates())
def Equals(self, x):
if x is self: return 1
if len(self.mutation_result_) != len(x.mutation_result_): return 0
for e1, e2 in zip(self.mutation_result_, x.mutation_result_):
if e1 != e2: return 0
if self.has_deprecated_mutation_result_ != x.has_deprecated_mutation_result_: return 0
if self.has_deprecated_mutation_result_ and self.deprecated_mutation_result_ != x.deprecated_mutation_result_: return 0
if self.has_index_updates_ != x.has_index_updates_: return 0
if self.has_index_updates_ and self.index_updates_ != x.index_updates_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.mutation_result_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_deprecated_mutation_result_ and not self.deprecated_mutation_result_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.mutation_result_)
for i in xrange(len(self.mutation_result_)): n += self.lengthString(self.mutation_result_[i].ByteSize())
if (self.has_deprecated_mutation_result_): n += 1 + self.lengthString(self.deprecated_mutation_result_.ByteSize())
if (self.has_index_updates_): n += 1 + self.lengthVarInt64(self.index_updates_)
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.mutation_result_)
for i in xrange(len(self.mutation_result_)): n += self.lengthString(self.mutation_result_[i].ByteSizePartial())
if (self.has_deprecated_mutation_result_): n += 1 + self.lengthString(self.deprecated_mutation_result_.ByteSizePartial())
if (self.has_index_updates_): n += 1 + self.lengthVarInt64(self.index_updates_)
return n
def Clear(self):
self.clear_mutation_result()
self.clear_deprecated_mutation_result()
self.clear_index_updates()
def OutputUnchecked(self, out):
if (self.has_deprecated_mutation_result_):
out.putVarInt32(10)
out.putVarInt32(self.deprecated_mutation_result_.ByteSize())
self.deprecated_mutation_result_.OutputUnchecked(out)
for i in xrange(len(self.mutation_result_)):
out.putVarInt32(26)
out.putVarInt32(self.mutation_result_[i].ByteSize())
self.mutation_result_[i].OutputUnchecked(out)
if (self.has_index_updates_):
out.putVarInt32(32)
out.putVarInt32(self.index_updates_)
def OutputPartial(self, out):
if (self.has_deprecated_mutation_result_):
out.putVarInt32(10)
out.putVarInt32(self.deprecated_mutation_result_.ByteSizePartial())
self.deprecated_mutation_result_.OutputPartial(out)
for i in xrange(len(self.mutation_result_)):
out.putVarInt32(26)
out.putVarInt32(self.mutation_result_[i].ByteSizePartial())
self.mutation_result_[i].OutputPartial(out)
if (self.has_index_updates_):
out.putVarInt32(32)
out.putVarInt32(self.index_updates_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deprecated_mutation_result().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_mutation_result().TryMerge(tmp)
continue
if tt == 32:
self.set_index_updates(d.getVarInt32())
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.mutation_result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("mutation_result%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_deprecated_mutation_result_:
res+=prefix+"deprecated_mutation_result <\n"
res+=self.deprecated_mutation_result_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_index_updates_: res+=prefix+("index_updates: %s\n" % self.DebugFormatInt32(self.index_updates_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kmutation_result = 3
kdeprecated_mutation_result = 1
kindex_updates = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "deprecated_mutation_result",
3: "mutation_result",
4: "index_updates",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.CommitResponse'
class AllocateIdsRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.allocate_ = []
self.reserve_ = []
if contents is not None: self.MergeFromString(contents)
def allocate_size(self): return len(self.allocate_)
def allocate_list(self): return self.allocate_
def allocate(self, i):
return self.allocate_[i]
def mutable_allocate(self, i):
return self.allocate_[i]
def add_allocate(self):
x = Key()
self.allocate_.append(x)
return x
def clear_allocate(self):
self.allocate_ = []
def reserve_size(self): return len(self.reserve_)
def reserve_list(self): return self.reserve_
def reserve(self, i):
return self.reserve_[i]
def mutable_reserve(self, i):
return self.reserve_[i]
def add_reserve(self):
x = Key()
self.reserve_.append(x)
return x
def clear_reserve(self):
self.reserve_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.allocate_size()): self.add_allocate().CopyFrom(x.allocate(i))
for i in xrange(x.reserve_size()): self.add_reserve().CopyFrom(x.reserve(i))
def Equals(self, x):
if x is self: return 1
if len(self.allocate_) != len(x.allocate_): return 0
for e1, e2 in zip(self.allocate_, x.allocate_):
if e1 != e2: return 0
if len(self.reserve_) != len(x.reserve_): return 0
for e1, e2 in zip(self.reserve_, x.reserve_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.allocate_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.reserve_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.allocate_)
for i in xrange(len(self.allocate_)): n += self.lengthString(self.allocate_[i].ByteSize())
n += 1 * len(self.reserve_)
for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.allocate_)
for i in xrange(len(self.allocate_)): n += self.lengthString(self.allocate_[i].ByteSizePartial())
n += 1 * len(self.reserve_)
for i in xrange(len(self.reserve_)): n += self.lengthString(self.reserve_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_allocate()
self.clear_reserve()
def OutputUnchecked(self, out):
for i in xrange(len(self.allocate_)):
out.putVarInt32(10)
out.putVarInt32(self.allocate_[i].ByteSize())
self.allocate_[i].OutputUnchecked(out)
for i in xrange(len(self.reserve_)):
out.putVarInt32(18)
out.putVarInt32(self.reserve_[i].ByteSize())
self.reserve_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.allocate_)):
out.putVarInt32(10)
out.putVarInt32(self.allocate_[i].ByteSizePartial())
self.allocate_[i].OutputPartial(out)
for i in xrange(len(self.reserve_)):
out.putVarInt32(18)
out.putVarInt32(self.reserve_[i].ByteSizePartial())
self.reserve_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_allocate().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_reserve().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.allocate_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("allocate%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.reserve_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("reserve%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kallocate = 1
kreserve = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "allocate",
2: "reserve",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.AllocateIdsRequest'
class AllocateIdsResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.allocated_ = []
if contents is not None: self.MergeFromString(contents)
def allocated_size(self): return len(self.allocated_)
def allocated_list(self): return self.allocated_
def allocated(self, i):
return self.allocated_[i]
def mutable_allocated(self, i):
return self.allocated_[i]
def add_allocated(self):
x = Key()
self.allocated_.append(x)
return x
def clear_allocated(self):
self.allocated_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.allocated_size()): self.add_allocated().CopyFrom(x.allocated(i))
def Equals(self, x):
if x is self: return 1
if len(self.allocated_) != len(x.allocated_): return 0
for e1, e2 in zip(self.allocated_, x.allocated_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.allocated_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.allocated_)
for i in xrange(len(self.allocated_)): n += self.lengthString(self.allocated_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.allocated_)
for i in xrange(len(self.allocated_)): n += self.lengthString(self.allocated_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_allocated()
def OutputUnchecked(self, out):
for i in xrange(len(self.allocated_)):
out.putVarInt32(10)
out.putVarInt32(self.allocated_[i].ByteSize())
self.allocated_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.allocated_)):
out.putVarInt32(10)
out.putVarInt32(self.allocated_[i].ByteSizePartial())
self.allocated_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_allocated().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.allocated_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("allocated%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kallocated = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "allocated",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.AllocateIdsResponse'
if _extension_runtime:
pass
__all__ = ['Error','EntityResult','Query','KindExpression','PropertyReference','PropertyExpression','PropertyOrder','Filter','CompositeFilter','PropertyFilter','GqlQuery','GqlQueryArg','QueryResultBatch','Mutation','MutationResult','DeprecatedMutation','DeprecatedMutationResult','ReadOptions','LookupRequest','LookupResponse','RunQueryRequest','RunQueryResponse','ContinueQueryRequest','ContinueQueryResponse','BeginTransactionRequest','BeginTransactionResponse','RollbackRequest','RollbackResponse','CommitRequest','CommitResponse','AllocateIdsRequest','AllocateIdsResponse']
|
forifelse/fispTools
|
refs/heads/master
|
assimp/contrib/gtest/scripts/pump.py
|
2471
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
VishvajitP/readthedocs.org
|
refs/heads/master
|
readthedocs/rtd_tests/tests/test_backend.py
|
30
|
from os.path import exists
from django.contrib.auth.models import User
from readthedocs.projects.models import Project
from readthedocs.rtd_tests.base import RTDTestCase
from readthedocs.rtd_tests.utils import make_test_git, make_test_hg
class TestGitBackend(RTDTestCase):
def setUp(self):
git_repo = make_test_git()
super(TestGitBackend, self).setUp()
self.eric = User(username='eric')
self.eric.set_password('test')
self.eric.save()
self.project = Project.objects.create(
name="Test Project",
repo_type="git",
#Our top-level checkout
repo=git_repo
)
self.project.users.add(self.eric)
def test_parse_branches(self):
data = """
develop
master
release/2.0.0
origin/2.0.X
origin/HEAD -> origin/master
origin/master
origin/release/2.0.0
"""
expected_ids = [
('develop', 'develop'),
('master', 'master'),
('release/2.0.0', 'release-2.0.0'),
('origin/2.0.X', '2.0.X'),
('origin/master', 'master'),
('origin/release/2.0.0', 'release-2.0.0')
]
given_ids = [(x.identifier, x.verbose_name) for x in
self.project.vcs_repo().parse_branches(data)]
self.assertEqual(expected_ids, given_ids)
def test_git_checkout(self):
repo = self.project.vcs_repo()
repo.checkout()
self.assertTrue(exists(repo.working_dir))
def test_parse_git_tags(self):
data = """\
3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0
bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1
c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2
a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2
c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1
edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2
"""
expected_tags = [
('3b32886c8d3cb815df3793b3937b2e91d0fb00f1', '2.0.0'),
('bd533a768ff661991a689d3758fcfe72f455435d', '2.0.1'),
('c0288a17899b2c6818f74e3a90b77e2a1779f96a', '2.0.2'),
('a63a2de628a3ce89034b7d1a5ca5e8159534eef0', '2.1.0.beta2'),
('c7fc3d16ed9dc0b19f0d27583ca661a64562d21e', '2.1.0.rc1'),
('edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1', '2.1.0.rc2'),
]
given_ids = [(x.identifier, x.verbose_name) for x in
self.project.vcs_repo().parse_tags(data)]
self.assertEqual(expected_tags, given_ids)
class TestHgBackend(RTDTestCase):
def setUp(self):
hg_repo = make_test_hg()
super(TestHgBackend, self).setUp()
self.eric = User(username='eric')
self.eric.set_password('test')
self.eric.save()
self.project = Project.objects.create(
name="Test Project",
repo_type="hg",
#Our top-level checkout
repo=hg_repo
)
self.project.users.add(self.eric)
def test_parse_branches(self):
data = """\
stable
default
"""
expected_ids = ['stable', 'default']
given_ids = [x.identifier for x in
self.project.vcs_repo().parse_branches(data)]
self.assertEqual(expected_ids, given_ids)
def test_checkout(self):
repo = self.project.vcs_repo()
repo.checkout()
self.assertTrue(exists(repo.working_dir))
def test_parse_tags(self):
data = """\
tip 13575:8e94a1b4e9a4
1.8.1 13573:aa1f3be38ab1
1.8 13515:2616325766e3
1.7.5 13334:2b2155623ee2
"""
expected_tags = [
('aa1f3be38ab1', '1.8.1'),
('2616325766e3', '1.8'),
('2b2155623ee2', '1.7.5'),
]
given_ids = [(x.identifier, x.verbose_name) for x in
self.project.vcs_repo().parse_tags(data)]
self.assertEqual(expected_tags, given_ids)
|
smnslwl/project_euler
|
refs/heads/master
|
50/50.py
|
1
|
"""
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below
one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a
prime, contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most
consecutive primes?
"""
from math import sqrt
def is_prime(n):
"""Returns true if n is a prime number"""
if n == 2 :
return True
if not n % 2 or n < 2:
return False
return all(n % x for x in range(3, int(sqrt(n)) + 1, 2))
def get_primes(upper):
"""Generate prime numbers upto upper"""
yield 2
p = 3
while p <= upper:
if is_prime(p):
yield p
p += 2
if __name__ == '__main__':
upper = 1000000
psums = [0]
for count, prime in enumerate(get_primes(upper)):
psums.append(psums[count] + prime)
if psums[count + 1] > upper:
break
max_len, max_prime = 1, 2
for i in range(count + 1):
for j in range(i + max_len, count + 1):
if is_prime(psums[j] - psums[i]) and j - i > max_len:
max_len, max_prime = j - i, psums[j] - psums[i]
print(max_prime)
"""
NOTE:
We first build a list of the sums of nth primes
until the sum exceeds our upper bound. While P(k), where k is an
index in the list of the primes , represents P(0) + ... + P(k),
we can find the sum of P(a) + ... + P(b) by subtracting
the sum P(0) + ... + P(a) from the sum P(0) + ... + P(b).
Then we see if the sum of prime from the ath prime to the bth prime,
where a < b, is a prime number. If it is, and it contains the more terms
than previously found, we update the value of the longest terms and the
prime generated by summing them up.
"""
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py
|
14
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_inventory
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower inventory.
description:
- Create, update, or destroy Ansible Tower inventories. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the inventory.
required: True
description:
description:
- The description to use for the inventory.
organization:
description:
- Organization the inventory belongs to.
required: True
variables:
description:
- Inventory variables. Use C(@) to get from file.
kind:
description:
- The kind field. Cannot be modified after created.
default: ""
choices: ["", "smart"]
version_added: "2.7"
host_filter:
description:
- The host_filter field. Only useful when C(kind=smart).
version_added: "2.7"
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower inventory
tower_inventory:
name: "Foo Inventory"
description: "Our Foo Cloud Servers"
organization: "Bar Org"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(),
organization=dict(required=True),
variables=dict(),
kind=dict(choices=['', 'smart'], default=''),
host_filter=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
variables = module.params.get('variables')
state = module.params.get('state')
kind = module.params.get('kind')
host_filter = module.params.get('host_filter')
json_output = {'inventory': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
inventory = tower_cli.get_resource('inventory')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = inventory.modify(name=name, organization=org['id'], variables=variables,
description=description, kind=kind, host_filter=host_filter,
create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = inventory.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update inventory, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update inventory: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
rehandalal/standup
|
refs/heads/master
|
standup/migrations/versions/004_add_many_to_many_helper_table_for_teams_and_users.py
|
3
|
from sqlalchemy import *
meta = MetaData()
team_users = Table('team_users', meta,
Column('team_id', Integer, ForeignKey('team.id')),
Column('user_id', Integer, ForeignKey('user.id')))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta.bind = migrate_engine
team = Table('team', meta, autoload=True)
user = Table('user', meta, autoload=True)
try:
team_users.drop()
except:
pass
team_users.create()
result = user.select().execute().fetchall()
for row in result:
if row.team_id:
values = {'team_id': row.team_id, 'user_id': row.id}
team_users.insert(values=values).execute()
user.c.team_id.drop()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
team = Table('team', meta, autoload=True)
user = Table('user', meta, autoload=True)
team_id = Column('team_id', Integer, ForeignKey('team.id'))
team_id.create(user)
result = team_users.select().execute().fetchall()
for row in result:
values = {'team_id': row.team_id}
user.update(values=values).where(user.c.id == row.user_id).execute()
team_users.drop()
|
lafranceinsoumise/api-django
|
refs/heads/master
|
agir/events/actions/__init__.py
|
1
|
from .export import *
|
Niranjan-K/andes
|
refs/heads/master
|
modules/andes-core/perftests/bin/processing/processTests.py
|
25
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import datetime
import sys
import string
from optparse import OptionParser
from datetime import datetime, timedelta
import shutil
def showUsage():
log("./processTests.py [-b|--broker-log-dir] <dir> [-t|--test-dir] <dir>")
ACCESS="Access"
MODIFY="Modify"
BROKER_LOG="broker.log"
BROKER_PID="broker.pid"
BROKER_CPU="broker_cpu.log"
BROKER_CPU_DATED="broker_cpu.log.dated"
BROKER_STATS="broker.stats"
BROKER_GC="gc.log"
GRAPH_DATA="graph.data"
_verbose = False
_debug = False
_brokerLogs = ""
def exitError(message):
log(message)
sys.exit(1)
def main():
global _log, _verbose, _debug, _brokerLogs
# Load the
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False, help="enable verbose output")
parser.add_option("-d", "--debug", dest="debug",
action="store_true", default=False, help="enable debug output")
parser.add_option("-b", "--broker-log-dir", dest="brokerLogs",
action="store", default=True, help="Broker Logs")
parser.add_option("-t", "--test-dir", dest="testDir",
action="store", default="", help="Test Results")
(options, args) = parser.parse_args()
_verbose = options.verbose
_debug = options.debug
testDir = options.testDir
_brokerLogs = options.brokerLogs
if testDir == "" or _brokerLogs == "" :
log("Broker Log Dir and Test Dir are both requried.")
showUsage()
if not os.path.exists(testDir):
exitError("Test directory does not exist:" + testDir)
if not os.path.exists(_brokerLogs):
exitError("Broker log directory does not exist:" + _brokerLogs)
# Standardize the format of the broker logs
preProcessBrokerLogs(_brokerLogs)
# Get list of test results from test_dir
processTestResults(testDir)
#
# Process the log files we know of
#
def preProcessBrokerLogs(resultDir):
print "Pre Processing Broker Logs"
# Pre-Process GC - no pre processing required
# Process Log4j - no processing required as file is already time stamped.
# Pre-Process broker_cpu
processCPUUsage(resultDir)
#
# Process the broker CPU log file and create an output file of format
# <Date Time> <CPU Usage>
#
#
def processCPUUsage(resultDir):
logfile=resultDir+os.sep+BROKER_CPU
datedFile=resultDir+os.sep+BROKER_CPU_DATED
start = extractTime(ACCESS, logfile+".stat")
pid = getPID(BROKER_PID)
topRate = getFirstLine(_brokerLogs+os.sep+"top.rate")
#
# Calulate addition required per process line output
#
if topRate.find(".") == -1:
seconds = topRate
millis = 0
else:
split = topRate.split('.')
seconds = split[0]
# Convert
millis = float("0."+split[1]) * 1000
offset = timedelta(seconds=int(seconds),milliseconds=int(millis))
#
# Process the CPU log file and make a file of format:
# datetime <CPU% usage> <MEM% usage>
#
# Open log CPU file for reading
logfile = open(logfile, "r")
# Open the output file, erasing any existing version
# Keep track of the min/max sum and entries,.
minCPU=float(sys.maxint)
maxCPU=0.0
minMem=float(sys.maxint)
maxMem=0.0
entries=0
sumCPU=0.0
sumMem=0.0
output= open(datedFile, "w")
for line in logfile:
#
# Data format
# 0 1 2 3 4 5 6 7 8 9 10 11
# PID USER PR NI %CPU TIME+ %MEM VIRT RES SHR S COMMAND
# PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
#
# %CPU and %MEM are vary, probably based on os/version of top.
# lets auto-detect where it is.
#
# Index is 0 based for array usage.
index = 0
if line.find("PID") != -1:
for key in line.split(" "):
strippedKey = key.lstrip()
if len(strippedKey) > 0:
# Record the key index
if (strippedKey == "%CPU"):
cpuIndex=index
if (strippedKey == "%MEM"):
memIndex=index
# Increase count for next key
index = index + 1
# Find lines that contain our broker process
if line.find("QPBRKR") != -1:
# Split line on whitespace
data = line.split()
#Write out the date time (ISO-8601 format)
output.write(str(start))
# Output the %CPU value
output.write(" "+str(data[cpuIndex]))
# Output the %MEM value
output.write(" "+str(data[memIndex]))
output.write('\n')
# Add the offset based on the logging rate
start = start + offset
# Record entires
entries = entries + 1
# Record Metrics
# Record CPU data
cpu = float(data[cpuIndex])
if (cpu < minCPU):
minCPU = cpu
if (cpu > maxCPU):
maxCPU = cpu
sumCPU = sumCPU + cpu
# Record Mem data
mem = float(data[memIndex])
if (mem < minMem):
minMem = mem
if (mem > maxMem):
maxMem = mem
sumMem = sumMem + mem
#end for
# Close the files
logfile.close
output.close
# Output stats file
statFile=resultDir+os.sep+BROKER_CPU+".stats"
output= open(statFile, "w")
output.write("#type:min/max/avg")
output.write('\n')
output.write("CPU:"+str(minCPU)+"/"+str(maxCPU)+"/"+str(float(sumCPU)/float(entries)))
output.write('\n')
output.write("MEM:"+str(minMem)+"/"+str(maxMem)+"/"+str(float(sumMem)/float(entries)))
output.write('\n')
output.close
log("Pre Process of CPU Log file '"+BROKER_CPU+"' complete")
#
# Give an known process type get the recorded PID.
#
def getPID(process):
return getFirstLine(_brokerLogs+os.sep+process)
#
# Get the first line of the file without EOL chars.
# NOTE: this will load the entire file into memory to do it.
#
def getFirstLine(fileName):
f = open(fileName,"r")
line = f.read().splitlines()[0]
f.close
return line
#
# Walk the directory given and process all csv test results
#
def processTestResults(resultDir):
for root, dirs, files in os.walk(resultDir, topdown=False):
if len(files) == 0:
exitError("Test result directory is empty:" + resultDir)
for file in files:
if file.endswith(".csv"):
processTestResult(root , file)
def processTestResult(root, resultFile):
# Open stat file and extract test times, we determine:
# -start time based on the 'Access' value
# -end time based on the 'Modify' value 'Change' would also work
statFile=root+os.sep+resultFile+".stat"
if not os.path.exists(statFile):
log("Unable to process : Unable to open stat file:" + statFile)
return
createResultSetPackage(root, resultFile)
def extractTime(field, statFile):
stats = open(statFile, "r")
for line in stats:
if line.startswith(field):
if line.find("(") == -1:
dt = lineToDate(" ".join(line.split()[1:]))
#
# TODO We need to handle time time zone issues as I'm sure we will have issues with the
# log4j matching.
stats.close
return dt
#
# Given a text line in ISO format convert it to a date object
#
def lineToDate(line):
#2009-06-22 17:04:44,320
#2009-06-22 17:04:44.320
pattern = re.compile(r'(?P<year>^[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9])-(?P<day>[0-9][0-9]) (?P<hour>[0-9][0-9]):(?P<minute>[0-9][0-9]):(?P<seconds>[0-9][0-9])')
m = pattern.match(line)
if m:
year = int(m.group('year'))
month = int(m.group('month'))
day = int(m.group('day'))
hour = int(m.group('hour'))
minute = int(m.group('minute'))
seconds = int(m.group('seconds'))
pattern = re.compile(r'(?P<year>^[0-9][0-9][0-9][0-9])-(?P<month>[0-9][0-9])-(?P<day>[0-9][0-9]) (?P<hour>[0-9][0-9]):(?P<minute>[0-9][0-9]):(?P<seconds>[0-9][0-9])[.|,](?P<micro>[0-9]+)')
m = pattern.match(line)
micro = None
if m:
micro = m.group('micro')
if micro == None:
micro = 0
# Correct issue where micros are actually nanos
if int(micro) > 999999:
micro = int(micro) / 1000
return datetime(year,month,day,hour,minute,seconds,int(micro))
else:
# Error we shouldn't get here
return null
def createResultSetPackage(root, resultFile):
# Get the Name of the test to make a directory with said name
testName = resultFile.split(".csv")[0]
resultDir = root+ os.sep + testName
log("Processing Result set for:"+ testName)
mkdir(resultDir)
# Move result file to new directory
shutil.move(root + os.sep + resultFile, resultDir)
# Move stat file to new directory
shutil.move(root + os.sep + resultFile + ".stat", resultDir)
statFile=resultDir + os.sep + resultFile + ".stat"
#
# Get start and end time for test run
#
start = extractTime(ACCESS, statFile)
end = extractTime(MODIFY, statFile)
sliceBrokerLogs(resultDir, start, end)
createGraphData(resultDir, testName)
createTestStatData(resultDir, testName)
log("Created Result Package for:"+ testName)
def sliceBrokerLogs(resultDir, start, end):
sliceCPULog(resultDir, start, end)
sliceLog4j(resultDir, start, end)
sliceGCLog(resultDir, start, end)
def sliceCPULog(resultDir, start, end):
global _brokerLogs
logfilePath=_brokerLogs+os.sep+BROKER_CPU_DATED
cpuSliceFile=resultDir+os.sep+BROKER_CPU
# Process the CPU log file and make a file of format:
# datetime <CPU% usage> <MEM% usage>
#
# Open log CPU file for reading
logFile = open(logfilePath, "r")
# Open the output file, erasing any existing version
# Keep track of the min/max sum and entries,.
minCPU=float(sys.maxint)
maxCPU=0.0
minMem=float(sys.maxint)
maxMem=0.0
entries=0
sumCPU=0.0
sumMem=0.0
#
# Create outputfile
#
cpuslice = open(cpuSliceFile,"w")
for line in logFile:
data = line.split()
#
# //fixme remove tz addition.
#
lineTime = lineToDate(" ".join(data[0:2])+" +0000")
if lineTime > start:
if lineTime < end:
# Write the data though to the new file
cpuslice.writelines(line)
# Perform stat processing for the min/max/avg
data = line.split()
#
# Data format is
# <Date> <Time> <%CPU> <%MEM>
# 2010-02-19 10:16:17 157 28.1
#
cpuIndex = 2
memIndex = 3
# Record entires
entries = entries + 1
# Record Metrics
# Record CPU data
cpu = float(data[cpuIndex])
if (cpu < minCPU):
minCPU = cpu
if (cpu > maxCPU):
maxCPU = cpu
sumCPU = sumCPU + cpu
# Record Mem data
mem = float(data[memIndex])
if (mem < minMem):
minMem = mem
if (mem > maxMem):
maxMem = mem
sumMem = sumMem + mem
logFile.close()
cpuslice.close()
log("Sliced CPU log")
# Output stats file
statFile=cpuSliceFile+".stats"
output= open(statFile, "w")
output.write("#type:min/max/avg")
output.write('\n')
output.write("CPU:"+str(minCPU)+"/"+str(maxCPU)+"/"+str(float(sumCPU)/float(entries)))
output.write('\n')
output.write("MEM:"+str(minMem)+"/"+str(maxMem)+"/"+str(float(sumMem)/float(entries)))
output.write('\n')
output.close
log("Generated stat data from CPU Log file")
def sliceGCLog(resultDir, start, end):
global _brokerLogs
logfilePath=_brokerLogs+os.sep+BROKER_GC
sliceFile=resultDir+os.sep+BROKER_GC
gcstart = extractTime(ACCESS, logfilePath+".stat")
# Open the output file, erasing any existing version
# Keep track of the min/max sum and entries,.
minGCDuration=float(sys.maxint)
maxGCDuration=0.0
sumGCDuration=0.0
entriesGCDuration = 0
# Open log GC file for reading
logFile = open(logfilePath, "r")
# Open the output file, erasing any existing version
output= open(sliceFile, "w")
# Use a regular expression to pull out the Seconds.Millis values from the
# Start of the gc log line.
pattern = re.compile(r'(?P<seconds>^[0-9]+)\.(?P<millis>[0-9]+):')
for line in logFile:
m = pattern.match(line)
if m:
seconds = m.group('seconds');
millis = m.group('millis');
offset = timedelta(seconds=int(seconds),milliseconds=int(millis))
lineTime = gcstart + offset
if lineTime > start:
if lineTime < end:
output.writelines(line)
# Perform stat processing for the min/max/avg
# Process GC Duration lines in ParNew gc ,
# ensure we do not have CMS printed as that means the line line has been corrupted
if line.find("ParNew") != -1 & line.find("CMS") == -1:
#
# Example data line
# 7.646: [GC 7.646: [ParNew: 14778K->461K(14784K), 0.0026610 secs] 49879K->36609K(73288K), 0.0027560 secs] [Times: user=0.01 sys=0.00, real=0.01 secs]
#
# So entry 5 is the ParNew time and 8 is the whole GC cycle. 14 entries total
data = line.split()
gcTime = 0
# Check we have a valid ParNew Line
if (len(data) == 15):
# Record entires
# Record GC Duration data
entriesGCDuration = entriesGCDuration + 1
gcTime = float(data[8])
if (gcTime < minGCDuration):
minGCDuration = gcTime
if (gcTime > maxGCDuration):
maxGCDuration = gcTime
sumGCDuration = sumGCDuration + gcTime
# Close the files
logFile.close
output.close()
log("Sliced gc log")
# Output stats file
statFile=sliceFile+".stats"
output= open(statFile, "w")
output.write("#type:min/max/avg")
output.write('\n')
#
# Only provide GCDuration if it was processed
#
output.write("GC_DUR:%.14f/%.14f/%.14f" % (minGCDuration, maxGCDuration , (sumGCDuration/float(entriesGCDuration))))
output.write('\n')
output.close
log("Generated stat data from CPU Log file")
def sliceLog4j(resultDir, start, end):
global _brokerLogs
logfilePath=_brokerLogs+os.sep+BROKER_LOG
log4jSliceFile=resultDir+os.sep+BROKER_LOG
log4jstart = extractTime(ACCESS, logfilePath+".stat")
#
# Say that first line is the start of the file,
# This value will give a time value to the initial
# logging before Log4j kicks in.
#
lineTime = log4jstart
# Process the broker log4j file
# Open log CPU file for reading
logFile = open(logfilePath, "r")
#
# Create outputfile
#
log4jslice = open(log4jSliceFile,"w")
for line in logFile:
data = line.split()
#
# If the line has a time at the start then process it
# otherwise use the previous time. This means if there is
# a stack trace in the middle of the log file then it will
# be copied over to the split file as long as it is in the
# split time.
#
if (hasTime(data)):
#
# //fixme remove tz addition.
#
lineTime = lineToDate(" ".join(data[0:2])+" +0000")
if lineTime > start:
if lineTime < end:
print line
log4jslice.writelines(line)
logFile.close()
log4jslice.close()
log("Sliced broker log")
#
# Check the first two entries of data can make a datetime object
#
def hasTime(data):
date = data[0]
time = data[1]
# Examples:
# 2009-06-22 17:04:44,246
# 2009-06-22 17:04:44.2464
# 2009-06-22 17:04:44
# ISO-8601 '-' format date
dateRE = re.compile('[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]')
#
# Check for times with/out millis
# e.g.
# 10:00:00,000 - log4j
# 10:00:00.0000 - generated in script for cpu time
#
timeRE = re.compile('[0-9][0-9]:[0-9][0-9]:[0-9][0-9]?[0-9]*')
return dateRE.match(date) and timeRE.match(time)
def createGraphData(resultDir, testName):
# Create graph.data file for process.sh
# Format two lines : Title and filename
# $version $type : $volume% volume
# $version-$brokerState-$type-$volume
version=getBrokerVersion()
test= extractTestValue("n",resultDir, testName)
volume = int(float(extractTestResult("Test * Size Throughput", resultDir, testName)) * 1000)
messageSize = extractTestValue("messageSize",resultDir, testName)
ackMode = ackModeToString(extractTestValue("consAckMode",resultDir, testName))
graphDataFile=resultDir+os.sep+GRAPH_DATA
graphData = open(graphDataFile, "w")
#
# Write Title
graphData.write(version+":"+test+":"+str(messageSize)+"kb x "+str(volume)+" msg/sec using "+ackMode)
graphData.write('\n')
#
# Write FileName
graphData.writelines(version+"-"+testName)
graphData.write('\n')
graphData.close
log("Created graph.data")
def getBrokerVersion():
global _brokerLogs
READY = "Qpid Broker Ready"
brokerLogFile = _brokerLogs + os.sep + BROKER_LOG
log = open(brokerLogFile, "r")
dataLine = ""
for line in log:
if line.find(READY) != -1:
dataLine = line
break
# Log Entry
#2009-06-19 17:04:02,493 INFO [main] server.Main (Main.java:456) - Qpid Broker Ready :2.3.0.1 build: 727403M
# Split on READY
data = dataLine.split(READY)
# So [1] should be
# :2.3.0.1 build: 727403M
readyEntries = data[1].split()
# so spliting on white space should give us ':version'
# and a quick split on ':' will give us the version
version = readyEntries[0].split(':')[1]
# Strip to ensure we have no whitespace
return version.strip()
def extractTestValue(property,resultDir,testName):
return extractTestData(property,resultDir,testName," =")
def extractTestResult(property,resultDir,testName):
return extractTestData(property,resultDir,testName,":")
def extractTestData(property,resultDir,testName,type):
resultFile = resultDir + os.sep + testName+".csv"
results = open(resultFile, "r")
dataLine = ""
for line in results:
if line.find("Total Tests:") == 0:
dataLine = line
results.close()
# Data is CSV
data = dataLine.split(',')
found = False
result = ""
searchProperty = property+type
for entry in data:
if found:
result = entry
break
if entry.strip() == searchProperty:
found=True
return result.strip()
def createTestStatData(resultDir, testName):
csvFilePath=resultDir + os.sep + testName + ".csv"
# Open the output file, erasing any existing version
# Keep track of the min/max sum and entries,.
minLatency=float(sys.maxint)
maxLatency=0.0
minThroughput=float(sys.maxint)
maxThroughput=0.0
entries=0
sumLatency=0.0
sumThroughput=0.0
#
# Open csv File
#
csvFile = open(csvFilePath,"r")
for line in csvFile:
# The PingAsyncTestPerf test class outputs the latency and throughput data.
if line.find("PingAsyncTestPerf") != -1:
#
# Data format is
# <Test> <TestName> <Thread> <Status> <Time> <Latency> <Concurrency> <Thread> <TestSize>
#org.wso2.andes.client.ping.PingAsyncTestPerf, testAsyncPingOk, Dispatcher-Channel-1, Pass, 209.074, 219.706, 0, 1, 10
LatencyIndex = 5
ThroughputIndex = 4
# The PingLatencyTestPerf test class just outputs the latency data.
if line.find("PingLatencyTestPerf") != -1:
#
# Data format is
# <Test> <TestName> <Thread> <Status> <Time> <Latency> <Concurrency> <Thread> <TestSize>
# org.wso2.andes.client.ping.PingLatencyTestPerf, testPingLatency, Dispatcher-Channel-1, Pass, 397.05502, 0, 2, 1000
LatencyIndex = 4
ThroughputIndex = -1
# Only process the test lines that have 'org.wso2.andes.client.ping', i.e. skip header and footer.
if line.find("org.wso2.andes.client.ping") != -1:
# Perform stat processing for the min/max/avg
data = line.split(",")
# Record entires
entries = entries + 1
# Record Metrics
# Record Latency data
latency = float(data[LatencyIndex])
if (latency < minLatency):
minLatency = latency
if (latency > maxLatency):
maxLatency = latency
sumLatency = sumLatency + latency
if (ThroughputIndex != -1):
# Record Latency data
throughput = float(data[ThroughputIndex])
if (throughput < minThroughput):
minThroughput = throughput
if (throughput > maxThroughput):
maxThroughput = throughput
sumThroughput = sumThroughput + throughput
csvFile.close()
# Output stats file
statFile=resultDir + os.sep + testName+".stats"
output= open(statFile, "w")
output.write("#type:min/max/avg")
output.write('\n')
output.write("LATENCY:"+str(minLatency)+"/"+str(maxLatency)+"/"+str(float(sumLatency)/float(entries)))
output.write('\n')
if (ThroughputIndex != -1):
# Output msgs/sec based on time for a batch of msgs
output.write("THROUGHPUT:"+str(float(1000)/maxThroughput)+"/"+str(float(1000)/minThroughput)+"/"+str(float(1000)/(float(sumThroughput)/float(entries))))
output.write('\n')
output.close
log("Generated stat data from test "+testName+" CSV file")
def ackModeToString(ackMode):
if ackMode == '0':
return "Transacted"
elif ackMode == '1':
return "AutoAck"
elif ackMode == '2':
return "ClientAck"
elif ackMode == '3':
return "DupsOK"
elif ackMode == '257':
return "NoAck"
elif ackMode == '258':
return "PreAck"
else:
return str(ackMode)
def debug(msg):
global _debug
if _debug:
log(msg)
def log(msg):
print msg
def mkdir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
if __name__ == "__main__":
main()
|
pacificclimate/modelmeta
|
refs/heads/master
|
mm_cataloguer/associate_ensemble.py
|
1
|
import logging
import traceback
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from modelmeta import DataFile, DataFileVariable, \
Ensemble, EnsembleDataFileVariables
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s', "%Y-%m-%d %H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def find_ensemble(sesh, name, version):
"""Find existing Ensemble record matching name and version
:param sesh: modelmeta database session
:param name: (str) name of ensemble
:param version: (float) version of ensemble
:return: existing Ensemble record or None
"""
q = sesh.query(Ensemble)\
.filter(Ensemble.name == name) \
.filter(Ensemble.version == version)
return q.first()
def associate_ensemble_to_data_file_variable(
session, ensemble, data_file_variable
):
"""Associate an ``Ensemble`` to a ``DataFileVariable``.
:param session: database session for access to modelmeta database
:param ensemble: (Ensemble) ensemble to associate
:param data_file_variable: (DataFileVariable) dfv to associate
:return: EnsembleDataFileVariables (association) record
"""
ensemble_dfv = (
session.query(EnsembleDataFileVariables)
.filter(EnsembleDataFileVariables.ensemble_id == ensemble.id)
.filter(EnsembleDataFileVariables.data_file_variable_id ==
data_file_variable.id)
.first()
)
if ensemble_dfv:
logger.info(
'Assocation for variable id {} to ensemble already exists'
.format(data_file_variable.id))
else:
logger.info('Associating variable id {} to ensemble'
.format(data_file_variable.id))
ensemble_dfv = EnsembleDataFileVariables(
ensemble_id=ensemble.id,
data_file_variable_id=data_file_variable.id
)
session.add(ensemble_dfv)
return ensemble_dfv
def associate_ensemble_to_data_file(session, ensemble, data_file, var_names):
"""Associate an ``Ensemble`` to ``DataFileVariable``s of a ``DataFile``.
:param session: database session for access to modelmeta database
:param ensemble: (Ensemble) ensemble to associate
:param data_file: (DataFile) data file to associate
:param var_names: (list) names of variables to associate
:return: list of ``DataFileVariable``s associated
"""
logger.info('Associating DataFile: {}'.format(data_file.filename))
associated_dfvs = []
for data_file_variable in data_file.data_file_variables:
if (not var_names
or data_file_variable.netcdf_variable_name in var_names):
associate_ensemble_to_data_file_variable(
session, ensemble, data_file_variable)
associated_dfvs.append(data_file_variable)
return associated_dfvs
def associate_ensemble_to_filepath(
session, ensemble_name, ensemble_ver,
regex_filepath, filepath, var_names
):
"""Associate an ensemble (specified by name and version) to
data file variables of data file(s) matching a given filepath pattern.
:param session: database session access to modelmeta database
:param ensemble_name: (str) name of ensemble
:param ensemble_ver: (float) version of ensemble
:param regex_filepath: (bool) if True, interpret filepath as regex
:param filepath: filepath of file, or regex for such
:param var_names: (list) names of variables to associate
:return: (list) tuple(``DataFile``, list of ``DataFileVariable``s
associated); one for each matching DataFile
"""
if regex_filepath:
logger.info('Processing filepath regex: {}'.format(filepath))
else:
logger.info('Processing filepath: {}'.format(filepath))
# Find the matching ``Ensemble``
ensemble = find_ensemble(session, ensemble_name, ensemble_ver)
if not ensemble:
raise ValueError(
"No existing ensemble matches name = '{}' and version = '{}'"
.format(ensemble_name, ensemble_ver))
# Find all matching ``DataFile``s
df_query = session.query(DataFile)
if regex_filepath:
df_query = df_query.filter(DataFile.filename.op('~')(filepath))
else:
df_query = df_query.filter(DataFile.filename == filepath)
data_files = df_query.all()
if not data_files:
logger.info('No matching DataFile records')
# Associate matching ensemble to matching data files
return [
(data_file,
associate_ensemble_to_data_file(
session, ensemble, data_file, var_names)
)
for data_file in data_files
]
def associate_ensemble_to_filepaths(
Session, ensemble_name, ensemble_ver,
regex_filepaths, filepaths, var_names
):
"""Associate a list of NetCDF files in modelmeta database to a specified
ensemble.
:param dsn: connection info for the modelmeta database to update
:param ensemble_name: (str) name of ensemble
:param ensemble_ver: (float) version of ensemble
:param regex_filepaths: (bool) if True, interpret filepaths as regexes
:param filepaths: list of files to index
:param var_names: list of names of variables to associate
:return: (list) tuple(id of ``DataFile``,
list of id of ``DataFileVariable``s)
associated; one for each matching DataFile
"""
associated_ids = []
for filepath in filepaths:
session = Session()
try:
associated_items = associate_ensemble_to_filepath(
session, ensemble_name, ensemble_ver,
regex_filepaths, filepath, var_names
)
associated_ids.extend([
(
data_file.id,
[dfv.id for dfv in data_file_variables]
)
for data_file, data_file_variables in associated_items
])
session.commit()
except:
logger.error(traceback.format_exc())
session.rollback()
finally:
session.close()
return associated_ids
def main(
dsn, ensemble_name, ensemble_ver,
regex_filepaths, filepaths, var_names
):
"""Associate a list of NetCDF files in modelmeta database to a specified
ensemble.
:param dsn: connection info for the modelmeta database to update
:param ensemble_name: (str) name of ensemble
:param ensemble_ver: (float) version of ensemble
:param regex_filepaths: (bool) if True, interpret filepaths as regexes
:param filepaths: list of files to index
:param var_names: list of names of variables to associate
:return: list of list of ids of ``DataFileVariable``s associated;
one sublist for each file processed
"""
engine = create_engine(dsn)
Session = sessionmaker(bind=engine)
return associate_ensemble_to_filepaths(
Session, ensemble_name, ensemble_ver,
regex_filepaths, filepaths, var_names
)
|
computersalat/ansible
|
refs/heads/devel
|
test/units/compat/builtins.py
|
115
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Compat for python2.7
#
# One unittest needs to import builtins via __import__() so we need to have
# the string that represents it
try:
import __builtin__
except ImportError:
BUILTINS = 'builtins'
else:
BUILTINS = '__builtin__'
|
jbonaiuto/perceptual-choice-hysteresis
|
refs/heads/master
|
src/python/perceptchoice/experiment/analysis.py
|
1
|
from datetime import datetime
import os
from matplotlib.mlab import normpdf
from matplotlib.patches import Rectangle
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import wilcoxon, norm, mannwhitneyu
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from perceptchoice.experiment.subject_info import stim_conditions, read_isi_subjects, read_subjects, isi_conditions
from perceptchoice.utils import FitSigmoid, FitRT, FitWeibull
stim_colors={
'ShamPreAnode': 'b',
'Anode': 'r',
'ShamPreCathode': 'b',
'Cathode': 'g'
}
isi_colors={
'low':'c',
'high':'m'
}
def analyze_subject_accuracy_rt(subject):
"""
Analyze the accuracy and RT of a single subject
"""
# Map of condition - coherence - accuracy (correct or incorrect for each trial)
condition_coherence_accuracy={}
# Map of condition - coherence - RT
condition_coherence_rt={}
# Map of condition - coherence - mean RT difference with control
condition_coherence_rt_diff={}
# Map of condition - accuracy threshold
condition_accuracy_thresh={}
# Iterate through conditions
for condition,trial_data in subject.session_data.iteritems():
# Init accuracy, RT maps
condition_coherence_accuracy[condition]={}
condition_coherence_rt[condition]={}
# For each trial
for trial_idx in range(trial_data.shape[0]):
# Get trial data
coherence=trial_data[trial_idx,2]
correct=trial_data[trial_idx,3]
rt=trial_data[trial_idx,6]
# Update accuracy
if not coherence in condition_coherence_accuracy[condition]:
condition_coherence_accuracy[condition][coherence]=[]
condition_coherence_accuracy[condition][np.abs(coherence)].append(float(correct))
# Update RT
if not coherence in condition_coherence_rt[condition]:
condition_coherence_rt[condition][coherence]=[]
condition_coherence_rt[condition][np.abs(coherence)].append(rt)
# Compute accuracy threshold
coherences = sorted(condition_coherence_accuracy[condition].keys())
accuracy=[]
for coherence in coherences:
accuracy.append(np.mean(condition_coherence_accuracy[condition][coherence]))
acc_fit = FitWeibull(coherences, accuracy, guess=[0.0, 0.2], display=0)
condition_accuracy_thresh[condition]=acc_fit.inverse(0.8)
# Compute RT diff
for stim_condition in ['Anode', 'Cathode']:
condition_coherence_rt_diff[stim_condition]={}
coherences=sorted(condition_coherence_rt[stim_condition].keys())
for coherence in coherences:
condition_coherence_rt_diff[stim_condition][coherence]=np.mean(condition_coherence_rt[stim_condition][coherence])-np.mean(condition_coherence_rt['ShamPre%s' % stim_condition][coherence])
# Compute RT diff for sham conditions - allows to compare sham conditions to each other
condition_coherence_rt_diff['Sham']={}
coherences=sorted(condition_coherence_rt['ShamPreAnode'].keys())
for coherence in coherences:
condition_coherence_rt_diff['Sham'][coherence]=np.mean(condition_coherence_rt['ShamPreAnode'][coherence])-np.mean(condition_coherence_rt['ShamPreCathode'][coherence])
return condition_coherence_accuracy, condition_coherence_rt, condition_coherence_rt_diff, condition_accuracy_thresh
def analyze_subject_isi_accuracy_rt(subject):
"""
Analyze the accuracy and RT of a single subject
"""
# Map of coherence - accuracy (correct or incorrect for each trial)
isi_coherence_accuracy={
'low':{},
'high':{}
}
# Map of coherence - RT
isi_coherence_rt={
'low':{},
'high':{}
}
# Map of condition - coherence - mean RT difference with control
isi_coherence_rt_diff={
'low':{},
'high':{}
}
# Map of isi - accuracy threshold
isi_accuracy_thresh={}
trial_data=subject.session_data['control']
mean_iti=np.mean(trial_data[1:,7])
# For each trial
for trial_idx in range(trial_data.shape[0]):
isi='low'
if trial_data[trial_idx,7]>mean_iti:
isi='high'
# Get trial data
coherence=trial_data[trial_idx,2]
correct=trial_data[trial_idx,3]
rt=trial_data[trial_idx,6]
# Update accuracy
if not coherence in isi_coherence_accuracy[isi]:
isi_coherence_accuracy[isi][coherence]=[]
isi_coherence_accuracy[isi][np.abs(coherence)].append(float(correct))
# Update RT
if not coherence in isi_coherence_rt[isi]:
isi_coherence_rt[isi][coherence]=[]
isi_coherence_rt[isi][np.abs(coherence)].append(rt)
# Compute accuracy threshold
for isi in ['low','high']:
coherences = sorted(isi_coherence_accuracy[isi].keys())
accuracy=[]
for coherence in coherences:
accuracy.append(np.mean(isi_coherence_accuracy[isi][coherence]))
acc_fit = FitWeibull(coherences, accuracy, guess=[0.0, 0.2], display=0)
isi_accuracy_thresh[isi]=acc_fit.inverse(0.8)
isi_coherence_rt_diff['low']={}
coherences=sorted(isi_coherence_rt['low'].keys())
for coherence in coherences:
isi_coherence_rt_diff['low'][coherence]=np.mean(isi_coherence_rt['low'][coherence])-np.mean(isi_coherence_rt['high'][coherence])
return isi_coherence_accuracy, isi_coherence_rt, isi_coherence_rt_diff, isi_accuracy_thresh
def analyze_isi_accuracy_rt(subjects, output_dir):
"""
Analyze accuracy and RT of all subjects
"""
isi_conditions=['low','high']
# Map of condition - coherence - mean accuracy for each subject
isi_coherence_accuracy={
'low':{},
'high':{}
}
# Map of condition - coherence - mean RT for each subject
isi_coherence_rt={
'low':{},
'high':{}
}
# Map of condition - coherence - mean RT difference with control for each subject
isi_coherence_rt_diff={
'low':{},
'high':{}
}
# Map of condition - accuracy threshold for each subject
isi_accuracy_thresh={
'low':[],
'high':[]
}
# For each subject
for subj_id in subjects:
subject=subjects[subj_id]
# Get subject accuracy, RT, RT diff, and accuracy threshold
subj_isi_coherence_accuracy, subj_isi_coherence_rt, subj_isi_coherence_rt_diff, subj_isi_accuracy_thresh=analyze_subject_isi_accuracy_rt(subject)
# Iterate over conditions
for isi in isi_conditions:
# Add accuray threshold to list for this condition
isi_accuracy_thresh[isi].append(subj_isi_accuracy_thresh[isi])
# Update accuracy with mean accuracy for this subject
for coherence in subj_isi_coherence_accuracy[isi]:
if not coherence in isi_coherence_accuracy[isi]:
isi_coherence_accuracy[isi][coherence]=[]
isi_coherence_accuracy[isi][coherence].append(np.mean(subj_isi_coherence_accuracy[isi][coherence]))
# Update RT with mean RT for this subject
for coherence in subj_isi_coherence_rt[isi]:
if not coherence in isi_coherence_rt[isi]:
isi_coherence_rt[isi][coherence]=[]
isi_coherence_rt[isi][coherence].append(np.mean(subj_isi_coherence_rt[isi][coherence]))
# Update RT difference
for isi in subj_isi_coherence_rt_diff:
for coherence in subj_isi_coherence_rt_diff[isi]:
if not coherence in isi_coherence_rt_diff[isi]:
isi_coherence_rt_diff[isi][coherence]=[]
isi_coherence_rt_diff[isi][coherence].append(subj_isi_coherence_rt_diff[isi][coherence])
# Compute accuracy threshold spread for each condition
thresh_std={}
for isi in isi_conditions:
thresh_std[isi]=np.std(isi_accuracy_thresh[isi])/np.sqrt(len(subjects))
plot_choice_accuracy(isi_conditions, isi_colors, isi_coherence_accuracy, thresh_std=thresh_std)
plot_choice_rt_scaled(isi_conditions, isi_colors, 'high', isi_coherence_rt)
plot_choice_rt_diff(['low'], isi_colors, isi_coherence_rt_diff)
# Write accuracy data to file
out_file=file(os.path.join(output_dir,'isi_accuracy.csv'),'w')
out_file.write('SubjID')
for isi in isi_coherence_accuracy:
for coherence in sorted(isi_coherence_accuracy[isi]):
out_file.write(',%sCoherence%.4fAccuracy' % (isi,coherence))
out_file.write('\n')
for subj_idx,subj in enumerate(subjects):
out_file.write('%d' % (subj_idx+1))
for isi in isi_coherence_accuracy:
for coherence in sorted(isi_coherence_accuracy[isi]):
out_file.write(',%.4f' % isi_coherence_accuracy[isi][coherence][subj_idx])
out_file.write('\n')
out_file.close()
# Write RT data to file
out_file=file(os.path.join(output_dir,'isi_rt.csv'),'w')
out_file.write('SubjID')
for isi in isi_coherence_rt:
for coherence in sorted(isi_coherence_rt[isi]):
out_file.write(',%sCoherence%.4fRT' % (isi,coherence))
out_file.write('\n')
for subj_idx,subj in enumerate(subjects):
out_file.write('%d' % (subj_idx+1))
for isi in isi_coherence_rt:
for coherence in sorted(isi_coherence_rt[isi]):
out_file.write(',%.4f' % isi_coherence_rt[isi][coherence][subj_idx])
out_file.write('\n')
out_file.close()
# Run stats on accuracy threshold
(W,p)=wilcoxon(isi_accuracy_thresh['low'],isi_accuracy_thresh['high'])
print('Accuracy Threshold: W(%d)=%.4f, p=%.4f' % (len(subjects)-1, W, p))
print('')
# Run stats on RT difference
rtdiff_results={
'coh': {}, 'intercept':{}
}
coh=[]
rt_diff=[]
for coherence in isi_coherence_rt_diff['low']:
for diff in isi_coherence_rt_diff['low'][coherence]:
coh.append(coherence)
rt_diff.append(diff)
data=pd.DataFrame({
'coh': coh,
'rt_diff': rt_diff
})
data['intercept']=1.0
lr = sm.GLM(data['rt_diff'], data[['coh','intercept']])
result = lr.fit()
for param in ['coh','intercept']:
rtdiff_results[param]['x']=result.params[param]
rtdiff_results[param]['t']=result.tvalues[param]
rtdiff_results[param]['p']=result.pvalues[param]
print('RT Diff')
print('B0: x=%.4f, t=%.4f, p=%.4f' % (rtdiff_results['intercept']['x'], rtdiff_results['intercept']['t'], rtdiff_results['intercept']['p']))
print('B1: x=%.4f, t=%.4f, p=%.4f' % (rtdiff_results['coh']['x'], rtdiff_results['coh']['t'], rtdiff_results['coh']['p']))
def analyze_isi_choice_hysteresis(subjects, output_dir):
"""
Analyze choice hysteresis of all subjects
"""
# Map of last response (L or R) - condition - coherence - average choice for each subject
isi_coherence_choices={
'L*': {
'low':{},
'high':{}
},
'R*': {
'low':{},
'high':{}
}
}
# Map of last response (L or R) - condition - coherence - sigmoid offset of each subject
isi_sigmoid_offsets={
'L*': {
'low':[],
'high':[]
},
'R*': {
'low':[],
'high':[]
}
}
# Map of logistic parameter (a1 or a2) - condition - parameter value for each subject
isi_logistic_params={
'a1': {
'low':[],
'high':[]
},
'a2': {
'low':[],
'high':[]
}
}
# For each subject
for subj_id in subjects:
subject=subjects[subj_id]
# Get choices, sigmoid offsets, logistic params
subj_isi_coherence_choices, subj_isi_sigmoid_offsets, subj_isi_logistic_params=analyze_subject_isi_choice_hysteresis(subject)
# For each condition
for isi in isi_conditions:
# Update sigmoid offsets
isi_sigmoid_offsets['L*'][isi].append(subj_isi_sigmoid_offsets['L*'][isi])
isi_sigmoid_offsets['R*'][isi].append(subj_isi_sigmoid_offsets['R*'][isi])
# Update logistic params
isi_logistic_params['a1'][isi].append(subj_isi_logistic_params['a1'][isi])
isi_logistic_params['a2'][isi].append(subj_isi_logistic_params['a2'][isi])
# Update L* choices
for coherence in subj_isi_coherence_choices['L*'][isi]:
if not coherence in isi_coherence_choices['L*'][isi]:
isi_coherence_choices['L*'][isi][coherence]=[]
isi_coherence_choices['L*'][isi][coherence].append(np.mean(subj_isi_coherence_choices['L*'][isi][coherence]))
# Update R* choices
for coherence in subj_isi_coherence_choices['R*'][isi]:
if not coherence in isi_coherence_choices['R*'][isi]:
isi_coherence_choices['R*'][isi][coherence]=[]
isi_coherence_choices['R*'][isi][coherence].append(np.mean(subj_isi_coherence_choices['R*'][isi][coherence]))
# Plot histograms
plot_indecision_hist(isi_conditions, isi_colors, isi_sigmoid_offsets, xlim=[-.2,.6], ylim=[0,35])
plot_logistic_parameter_ratio(isi_conditions, isi_colors, isi_logistic_params, xlim=[-0.1,0.35],ylim=[0,35])
# Output indecision point data (sigmoid offsets)
output_file=file(os.path.join(output_dir,'isi_indecision.csv'),'w')
output_file.write('SubjID')
for direction in ['L*','R*']:
for isi in isi_sigmoid_offsets[direction]:
output_file.write(',%s%s' % (direction, isi))
output_file.write('\n')
for subj_idx, subj in enumerate(subjects):
output_file.write('%d' % (subj_idx+1))
for direction in ['L*','R*']:
for isi in isi_sigmoid_offsets[direction]:
output_file.write(',%.4f' % isi_sigmoid_offsets[direction][isi][subj_idx])
output_file.write('\n')
output_file.close()
# Output logistic params
output_file=file(os.path.join(output_dir,'isi_logistic.csv'),'w')
output_file.write('SubjID')
for param in ['a1','a2']:
for isi in isi_logistic_params[param]:
output_file.write(',%s%s' % (param, isi))
output_file.write('\n')
for subj_idx, subj in enumerate(subjects):
output_file.write('%d' % (subj_idx+1))
for param in ['a1','a2']:
for isi in isi_logistic_params[param]:
output_file.write(',%.4f' % isi_logistic_params[param][isi][subj_idx])
output_file.write('\n')
output_file.close()
# Run stats on indecision point
indec_results={}
(indec_results['W'],indec_results['p'])=wilcoxon(np.array(isi_sigmoid_offsets['L*']['low'])-np.array(isi_sigmoid_offsets['R*']['low']),
np.array(isi_sigmoid_offsets['L*']['high'])-np.array(isi_sigmoid_offsets['R*']['high']))
print('Indecision Point: W=%.4f, p=%.4f' % (indec_results['W'], indec_results['p']))
(indec_results['W'],indec_results['p'])=wilcoxon(np.array(isi_sigmoid_offsets['L*']['low'])-np.array(isi_sigmoid_offsets['R*']['low']))
print('Indecision Point, low: W=%.4f, p=%.4f' % (indec_results['W'], indec_results['p']))
(indec_results['W'],indec_results['p'])=wilcoxon(np.array(isi_sigmoid_offsets['L*']['high'])-np.array(isi_sigmoid_offsets['R*']['high']))
print('Indecision Point, high: W=%.4f, p=%.4f' % (indec_results['W'], indec_results['p']))
print('')
# Run stats on logistic parameters
log_results={}
low_ratio=np.array(isi_logistic_params['a2']['low'])/np.array(isi_logistic_params['a1']['low'])
high_ratio=np.array(isi_logistic_params['a2']['high'])/np.array(isi_logistic_params['a1']['high'])
(log_results['W'],log_results['p'])=wilcoxon(low_ratio, high_ratio)
print('Logistic Regression: W=%.4f, p=%.4f' % (log_results['W'],log_results['p']))
(log_results['W'],log_results['p'])=wilcoxon(low_ratio)
print('Logistic Regression, low: W=%.4f, p=%.4f' % (log_results['W'],log_results['p']))
(log_results['W'],log_results['p'])=wilcoxon(high_ratio)
print('Logistic Regression, high: W=%.4f, p=%.4f' % (log_results['W'],log_results['p']))
print('')
def analyze_subject_isi_choice_hysteresis(subject):
"""
Analyze choice hysteresis for a single subject
"""
# Map of last response (L or R) - condition - coherence - choice
isi_coherence_choices={
'L*': {
'low':{},
'high':{}
},
'R*': {
'low':{},
'high':{}
}
}
# Map of last response (L or R) - condition - sigmoid offset
isi_sigmoid_offsets={
'L*': {},
'R*': {}
}
# Map of logistic parameter (a1 or a2) - condition
isi_logistic_params={
'a1': {},
'a2': {}
}
# Iterate over conditions
trial_data=subject.session_data['control']
mean_iti=np.mean(trial_data[1:,7])
# For each trial
for trial_idx in range(trial_data.shape[0]):
isi='low'
if trial_data[trial_idx,7]>mean_iti:
isi='high'
# Get coherence - negative coherences when direction is to the left
coherence=trial_data[trial_idx,2]*trial_data[trial_idx,1]
last_resp=trial_data[trial_idx,5]
resp=trial_data[trial_idx,4]
# Last response was left
if last_resp<0:
if not coherence in isi_coherence_choices['L*'][isi]:
isi_coherence_choices['L*'][isi][coherence]=[]
# Append 0 to list if left (-1) or 1 if right
isi_coherence_choices['L*'][isi][coherence].append(np.max([0,resp]))
# Last response was right
elif last_resp>0:
# List of rightward choices (0=left, 1=right)
if not coherence in isi_coherence_choices['R*'][isi]:
isi_coherence_choices['R*'][isi][coherence]=[]
# Append 0 to list if left (-1) or 1 if right
isi_coherence_choices['R*'][isi][coherence].append(np.max([0,resp]))
# Compute sigmoid offsets
for isi in ['low','high']:
for dir in ['L*','R*']:
choice_probs=[]
full_coherences=[]
for coherence in isi_coherence_choices[dir][isi]:
choice_probs.append(np.mean(isi_coherence_choices[dir][isi][coherence]))
full_coherences.append(coherence)
acc_fit=FitSigmoid(full_coherences, choice_probs, guess=[0.0, 0.2], display=0)
isi_sigmoid_offsets[dir][isi]=acc_fit.inverse(0.5)
low_trials=np.where(trial_data[:,7]<=mean_iti)[0]
# Prepare data for logistic
data=pd.DataFrame({
'resp': np.clip(trial_data[low_trials,4],0,1),
# negative coherences when direction is to the left
'coh': trial_data[low_trials,2]*trial_data[low_trials,1],
'last_resp': trial_data[low_trials,5]
})
# Fit intercept
data['intercept']=1.0
# Run logistic regression and get params
data=data[np.isfinite(data['last_resp'])]
logit = sm.Logit(data['resp'], data[['coh','last_resp','intercept']])
result = logit.fit(disp=False)
isi_logistic_params['a1']['low']=result.params['coh']
isi_logistic_params['a2']['low']=result.params['last_resp']
high_trials=np.where(trial_data[:,7]>mean_iti)[0]
# Prepare data for logistic
data=pd.DataFrame({
'resp': np.clip(trial_data[high_trials,4],0,1),
# negative coherences when direction is to the left
'coh': trial_data[high_trials,2]*trial_data[high_trials,1],
'last_resp': trial_data[high_trials,5]
})
# Fit intercept
data['intercept']=1.0
# Run logistic regression and get params
data=data[np.isfinite(data['last_resp'])]
logit = sm.Logit(data['resp'], data[['coh','last_resp','intercept']])
result = logit.fit(disp=False)
isi_logistic_params['a1']['high']=result.params['coh']
isi_logistic_params['a2']['high']=result.params['last_resp']
return isi_coherence_choices, isi_sigmoid_offsets, isi_logistic_params
def analyze_subject_choice_hysteresis(subject):
"""
Analyze choice hysteresis for a single subject
"""
# Map of last response (L or R) - condition - coherence - choice
condition_coherence_choices={
'L*': {},
'R*': {}
}
# Map of last response (L or R) - condition - sigmoid offset
condition_sigmoid_offsets={
'L*': {},
'R*': {}
}
# Map of logistic parameter (a1 or a2) - condition
condition_logistic_params={
'a1': {},
'a2': {}
}
# Iterate over conditions
for condition,trial_data in subject.session_data.iteritems():
# Dict of coherence levels
condition_coherence_choices['L*'][condition]={}
condition_coherence_choices['R*'][condition]={}
# For each trial
for trial_idx in range(trial_data.shape[0]):
# Get coherence - negative coherences when direction is to the left
coherence=trial_data[trial_idx,2]*trial_data[trial_idx,1]
last_resp=trial_data[trial_idx,5]
resp=trial_data[trial_idx,4]
# Last response was left
if last_resp<0:
if not coherence in condition_coherence_choices['L*'][condition]:
condition_coherence_choices['L*'][condition][coherence]=[]
# Append 0 to list if left (-1) or 1 if right
condition_coherence_choices['L*'][condition][coherence].append(np.max([0,resp]))
# Last response was right
elif last_resp>0:
# List of rightward choices (0=left, 1=right)
if not coherence in condition_coherence_choices['R*'][condition]:
condition_coherence_choices['R*'][condition][coherence]=[]
# Append 0 to list if left (-1) or 1 if right
condition_coherence_choices['R*'][condition][coherence].append(np.max([0,resp]))
# Compute sigmoid offsets
for dir in ['L*','R*']:
choice_probs=[]
full_coherences=[]
for coherence in condition_coherence_choices[dir][condition]:
choice_probs.append(np.mean(condition_coherence_choices[dir][condition][coherence]))
full_coherences.append(coherence)
acc_fit=FitSigmoid(full_coherences, choice_probs, guess=[0.0, 0.2], display=0)
condition_sigmoid_offsets[dir][condition]=acc_fit.inverse(0.5)
# Prepare data for logistic
data=pd.DataFrame({
'resp': np.clip(trial_data[1:,4],0,1),
# negative coherences when direction is to the left
'coh': trial_data[1:,2]*trial_data[1:,1],
'last_resp': trial_data[1:,5]
})
# Fit intercept
data['intercept']=1.0
# Run logistic regression and get params
logit = sm.Logit(data['resp'], data[['coh','last_resp','intercept']])
result = logit.fit(disp=False)
condition_logistic_params['a1'][condition]=result.params['coh']
condition_logistic_params['a2'][condition]=result.params['last_resp']
return condition_coherence_choices, condition_sigmoid_offsets, condition_logistic_params
def analyze_accuracy_rt(subjects, output_dir):
"""
Analyze accuracy and RT of all subjects
"""
# Map of condition - coherence - mean accuracy for each subject
condition_coherence_accuracy={}
# Map of condition - coherence - mean RT for each subject
condition_coherence_rt={}
# Map of condition - coherence - mean RT difference with control for each subject
condition_coherence_rt_diff={}
# Map of condition - accuracy threshold for each subject
condition_accuracy_thresh={}
# For each subject
for subj_id in subjects:
subject=subjects[subj_id]
# Get subject accuracy, RT, RT diff, and accuracy threshold
subj_condition_coherence_accuracy, subj_condition_coherence_rt, subj_condition_coherence_rt_diff,\
subj_condition_accuracy_thresh=analyze_subject_accuracy_rt(subject)
# Iterate over conditions
for condition in stim_conditions:
# Init maps
if not condition in condition_coherence_accuracy:
condition_coherence_accuracy[condition]={}
condition_coherence_rt[condition]={}
condition_accuracy_thresh[condition]=[]
# Add accuray threshold to list for this condition
condition_accuracy_thresh[condition].append(subj_condition_accuracy_thresh[condition])
# Update accuracy with mean accuracy for this subject
for coherence in subj_condition_coherence_accuracy[condition]:
if not coherence in condition_coherence_accuracy[condition]:
condition_coherence_accuracy[condition][coherence]=[]
condition_coherence_accuracy[condition][coherence].append(np.mean(subj_condition_coherence_accuracy[condition][coherence]))
# Update RT with mean RT for this subject
for coherence in subj_condition_coherence_rt[condition]:
if not coherence in condition_coherence_rt[condition]:
condition_coherence_rt[condition][coherence]=[]
condition_coherence_rt[condition][coherence].append(np.mean(subj_condition_coherence_rt[condition][coherence]))
# Update RT difference
for condition in subj_condition_coherence_rt_diff:
if not condition in condition_coherence_rt_diff:
condition_coherence_rt_diff[condition]={}
for coherence in subj_condition_coherence_rt_diff[condition]:
if not coherence in condition_coherence_rt_diff[condition]:
condition_coherence_rt_diff[condition][coherence]=[]
condition_coherence_rt_diff[condition][coherence].append(subj_condition_coherence_rt_diff[condition][coherence])
# Compute accuracy threshold spread for each condition
thresh_std={}
for condition in stim_conditions:
thresh_std[condition]=np.std(condition_accuracy_thresh[condition])/np.sqrt(len(subjects))
# Generate plots
# One figure for each stimulation condition
for cond_idx, stim_condition in enumerate(['Anode', 'Cathode']):
sham_condition='ShamPre%s' % stim_condition
conditions=[sham_condition, stim_condition]
plot_choice_accuracy(conditions, stim_colors, condition_coherence_accuracy, thresh_std=thresh_std)
plot_choice_rt_scaled(conditions, stim_colors, sham_condition, condition_coherence_rt)
plot_choice_rt_diff(['Anode','Cathode'],stim_colors, condition_coherence_rt_diff)
# Write accuracy data to file
out_file=file(os.path.join(output_dir,'accuracy.csv'),'w')
out_file.write('SubjID')
for condition in condition_coherence_accuracy:
for coherence in sorted(condition_coherence_accuracy[condition]):
out_file.write(',%sCoherence%.4fAccuracy' % (condition,coherence))
out_file.write('\n')
for subj_idx,subj in enumerate(subjects):
out_file.write('%d' % (subj_idx+1))
for condition in condition_coherence_accuracy:
for coherence in sorted(condition_coherence_accuracy[condition]):
out_file.write(',%.4f' % condition_coherence_accuracy[condition][coherence][subj_idx])
out_file.write('\n')
out_file.close()
# Write RT data to file
out_file=file(os.path.join(output_dir,'rt.csv'),'w')
out_file.write('SubjID')
for condition in condition_coherence_rt:
for coherence in sorted(condition_coherence_rt[condition]):
out_file.write(',%sCoherence%.4fRT' % (condition,coherence))
out_file.write('\n')
for subj_idx,subj in enumerate(subjects):
out_file.write('%d' % (subj_idx+1))
for condition in condition_coherence_rt:
for coherence in sorted(condition_coherence_rt[condition]):
out_file.write(',%.4f' % condition_coherence_rt[condition][coherence][subj_idx])
out_file.write('\n')
out_file.close()
# Write RT diff data to file
out_file=file(os.path.join(output_dir,'rt_diff.csv'),'w')
out_file.write('SubjID')
for condition in condition_coherence_rt_diff:
for coherence in sorted(condition_coherence_rt_diff[condition]):
out_file.write(',%sCoherence%.4fRTDiff' % (condition,coherence))
out_file.write('\n')
for subj_idx,subj in enumerate(subjects):
out_file.write('%d' % (subj_idx+1))
for condition in condition_coherence_rt_diff:
for coherence in sorted(condition_coherence_rt_diff[condition]):
out_file.write(',%.4f' % condition_coherence_rt_diff[condition][coherence][subj_idx])
out_file.write('\n')
out_file.close()
# Run stats on accuracy threshold
thresh_results={
'sham': {},
'cathode': {},
'anode': {},
}
(thresh_results['sham']['W'],thresh_results['sham']['p'])=wilcoxon(condition_accuracy_thresh['ShamPreCathode'],condition_accuracy_thresh['ShamPreAnode'])
(thresh_results['cathode']['W'],thresh_results['cathode']['p'])=wilcoxon(condition_accuracy_thresh['ShamPreCathode'],condition_accuracy_thresh['Cathode'])
(thresh_results['anode']['W'],thresh_results['anode']['p'])=wilcoxon(condition_accuracy_thresh['ShamPreAnode'],condition_accuracy_thresh['Anode'])
print('Accuracy Threshold')
for condition, results in thresh_results.iteritems():
N=len(subjects)
print('%s: W(%d)=%.4f, p=%.4f' % (condition, N-1, results['W'], results['p']))
print('')
# Run stats on RT difference
rtdiff_results={
'sham': {'coh': {}, 'intercept':{}},
'anode': {'coh': {}, 'intercept':{}},
'cathode': {'coh': {}, 'intercept':{}}
}
for condition in ['Sham','Anode','Cathode']:
coh=[]
rt_diff=[]
for coherence in condition_coherence_rt_diff[condition]:
for diff in condition_coherence_rt_diff[condition][coherence]:
coh.append(coherence)
rt_diff.append(diff)
data=pd.DataFrame({
'coh': coh,
'rt_diff': rt_diff
})
data['intercept']=1.0
lr = sm.GLM(data['rt_diff'], data[['coh','intercept']])
result = lr.fit()
for param in ['coh','intercept']:
rtdiff_results[condition.lower()][param]['x']=result.params[param]
rtdiff_results[condition.lower()][param]['t']=result.tvalues[param]
rtdiff_results[condition.lower()][param]['p']=result.pvalues[param]
print('RT Diff')
for condition, results in rtdiff_results.iteritems():
print('%s, B1: x=%.4f, t=%.4f, p=%.4f' % (condition, results['coh']['x'], results['coh']['t'],
results['coh']['p']))
def analyze_choice_hysteresis(subjects, output_dir):
"""
Analyze choice hysteresis of all subjects
"""
# Map of last response (L or R) - condition - coherence - average choice for each subject
condition_coherence_choices={
'L*': {},
'R*': {}
}
# Map of last response (L or R) - condition - coherence - sigmoid offset of each subject
condition_sigmoid_offsets={
'L*': {},
'R*': {}
}
# Map of logistic parameter (a1 or a2) - condition - parameter value for each subject
condition_logistic_params={
'a1': {},
'a2': {}
}
# For each subject
for subj_id in subjects:
subject=subjects[subj_id]
# Get choices, sigmoid offsets, logistic params
subj_condition_coherence_choices, subj_condition_sigmoid_offsets, subj_condition_logistic_params=analyze_subject_choice_hysteresis(subject)
# For each condition
for condition in stim_conditions:
# Init maps
if not condition in condition_coherence_choices['L*']:
condition_coherence_choices['L*'][condition]={}
condition_coherence_choices['R*'][condition]={}
condition_sigmoid_offsets['L*'][condition]=[]
condition_sigmoid_offsets['R*'][condition]=[]
condition_logistic_params['a1'][condition]=[]
condition_logistic_params['a2'][condition]=[]
# Update sigmoid offsets
condition_sigmoid_offsets['L*'][condition].append(subj_condition_sigmoid_offsets['L*'][condition])
condition_sigmoid_offsets['R*'][condition].append(subj_condition_sigmoid_offsets['R*'][condition])
# Update logistic params
condition_logistic_params['a1'][condition].append(subj_condition_logistic_params['a1'][condition])
condition_logistic_params['a2'][condition].append(subj_condition_logistic_params['a2'][condition])
# Update L* choices
for coherence in subj_condition_coherence_choices['L*'][condition]:
if not coherence in condition_coherence_choices['L*'][condition]:
condition_coherence_choices['L*'][condition][coherence]=[]
condition_coherence_choices['L*'][condition][coherence].append(np.mean(subj_condition_coherence_choices['L*'][condition][coherence]))
# Update R* choices
for coherence in subj_condition_coherence_choices['R*'][condition]:
if not coherence in condition_coherence_choices['R*'][condition]:
condition_coherence_choices['R*'][condition][coherence]=[]
condition_coherence_choices['R*'][condition][coherence].append(np.mean(subj_condition_coherence_choices['R*'][condition][coherence]))
# Plot histograms
# One plot for each stimulation condition
for cond_idx, stim_condition in enumerate(['Anode', 'Cathode']):
conditions=['ShamPre%s' % stim_condition, stim_condition]
plot_indecision_hist(conditions, stim_colors, condition_sigmoid_offsets)
plot_logistic_parameter_ratio(conditions, stim_colors, condition_logistic_params)
# Output indecision point data (sigmoid offsets)
output_file=file(os.path.join(output_dir,'indecision.csv'),'w')
output_file.write('SubjID')
for direction in ['L*','R*']:
for condition in condition_sigmoid_offsets[direction]:
output_file.write(',%s%s' % (direction, condition))
output_file.write('\n')
for subj_idx, subj in enumerate(subjects):
output_file.write('%d' % (subj_idx+1))
for direction in ['L*','R*']:
for condition in condition_sigmoid_offsets[direction]:
output_file.write(',%.4f' % condition_sigmoid_offsets[direction][condition][subj_idx])
output_file.write('\n')
output_file.close()
# Output logistic params
output_file=file(os.path.join(output_dir,'logistic.csv'),'w')
output_file.write('SubjID')
for param in ['a1','a2']:
for condition in condition_logistic_params[param]:
output_file.write(',%s%s' % (param, condition))
output_file.write('\n')
for subj_idx, subj in enumerate(subjects):
output_file.write('%d' % (subj_idx+1))
for param in ['a1','a2']:
for condition in condition_logistic_params[param]:
output_file.write(',%.4f' % condition_logistic_params[param][condition][subj_idx])
output_file.write('\n')
output_file.close()
# Run stats on indecision point
indec_results={
'sham': {},
'anode': {},
'cathode': {},
'sham_anode': {},
'sham_cathode': {},
}
(indec_results['sham']['W'],indec_results['sham']['p'])=wilcoxon(np.array(condition_sigmoid_offsets['L*']['ShamPreCathode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreCathode']),
np.array(condition_sigmoid_offsets['L*']['ShamPreAnode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreAnode']))
(indec_results['cathode']['W'],indec_results['cathode']['p'])=wilcoxon(np.array(condition_sigmoid_offsets['L*']['ShamPreCathode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreCathode']),
np.array(condition_sigmoid_offsets['L*']['Cathode'])-np.array(condition_sigmoid_offsets['R*']['Cathode']))
(indec_results['anode']['W'],indec_results['anode']['p'])=wilcoxon(np.array(condition_sigmoid_offsets['L*']['ShamPreAnode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreAnode']),
np.array(condition_sigmoid_offsets['L*']['Anode'])-np.array(condition_sigmoid_offsets['R*']['Anode']))
(indec_results['sham_anode']['W'],indec_results['sham_anode']['p'])=wilcoxon(np.array(condition_sigmoid_offsets['L*']['ShamPreAnode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreAnode']))
(indec_results['sham_cathode']['W'],indec_results['sham_cathode']['p'])=wilcoxon(np.array(condition_sigmoid_offsets['L*']['ShamPreCathode'])-np.array(condition_sigmoid_offsets['R*']['ShamPreCathode']))
print('Indecision Points')
for condition, results in indec_results.iteritems():
print('%s: W=%.4f, p=%.4f' % (condition, results['W'], results['p']))
print('')
# Run stats on logistic parameters
log_results={
'sham': {},
'anode': {},
'cathode': {},
'sham_anode': {},
'sham_cathode': {},
}
sham_anode_ratio=np.array(condition_logistic_params['a2']['ShamPreAnode'])/np.array(condition_logistic_params['a1']['ShamPreAnode'])
sham_cathode_ratio=np.array(condition_logistic_params['a2']['ShamPreCathode'])/np.array(condition_logistic_params['a1']['ShamPreCathode'])
(log_results['sham']['W'],log_results['sham']['p'])=wilcoxon(sham_anode_ratio, sham_cathode_ratio)
(log_results['sham_anode']['W'],log_results['sham_anode']['p'])=wilcoxon(sham_anode_ratio)
(log_results['sham_cathode']['W'],log_results['sham_cathode']['p'])=wilcoxon(sham_cathode_ratio)
anode_ratio=np.array(condition_logistic_params['a2']['Anode'])/np.array(condition_logistic_params['a1']['Anode'])
(log_results['anode']['W'],log_results['anode']['p'])=wilcoxon(sham_anode_ratio, anode_ratio)
cathode_ratio=np.array(condition_logistic_params['a2']['Cathode'])/np.array(condition_logistic_params['a1']['Cathode'])
(log_results['cathode']['W'],log_results['cathode']['p'])=wilcoxon(sham_cathode_ratio, cathode_ratio)
print('Logistic Regression')
for condition, results in log_results.iteritems():
print('%s: W=%.4f, p=%.4f' % (condition, results['W'],results['p']))
print('')
def plot_choice_accuracy(plot_conditions, plot_colors, condition_coherence_accuracy, thresh_std=None):
"""
Plot choice accuracy over coherence level for each condition
colors = map (condition, color)
condition_coherence_accuracy - map of condition - coherence - mean accuracy for each subject
thresh_std = accuracy threshold spread for each condition
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Plot stimulation condition and preceding sham condition
for condition in plot_conditions:
# Fit accuracy to Weibull
coherences = sorted(condition_coherence_accuracy[condition].keys())
mean_accuracy = [np.mean(condition_coherence_accuracy[condition][coherence]) for coherence in coherences]
stderr_accuracy = [np.std(condition_coherence_accuracy[condition][coherence])/np.sqrt(len(condition_coherence_accuracy[condition][coherence])) for coherence in coherences]
acc_fit = FitWeibull(coherences, mean_accuracy, guess=[0.0, 0.2], display=0)
# Plot with error
smoothInt = np.arange(.01, 1.0, 0.001)
smoothResp = acc_fit.eval(smoothInt)
ax.semilogx(smoothInt, smoothResp, plot_colors[condition], label=condition)
ax.errorbar(coherences, mean_accuracy, yerr=stderr_accuracy, fmt='o%s' % plot_colors[condition])
# Plot threshold and spread
thresh=acc_fit.inverse(0.8)
ax.plot([thresh,thresh],[0.5,1],'--%s' % plot_colors[condition])
if thresh_std is not None:
rect=Rectangle((thresh-.5*thresh_std[condition],0.5),thresh_std[condition], .5, alpha=0.25,
facecolor=plot_colors[condition], edgecolor='none')
ax.add_patch(rect)
ax.set_xlim([0.01,1.0])
ax.set_ylim([0.5,1.0])
ax.legend(loc='best')
ax.set_xlabel('Coherence')
ax.set_ylabel('% Correct')
def plot_choice_rt_scaled(plot_conditions, plot_colors, control_condition, condition_coherence_rt):
"""
Plot RT over coherence level for each condition - scaled to min and max during sham
colors = map (condition, color)
condition_coherence_rt - map of condition - coherence - mean RT for each subject
"""
# One figure for each stimulation condition
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Compute scale based on min and max RT during corresponding sham conditoin
control_coherences=sorted(condition_coherence_rt[control_condition].keys())
control_mean_rt = [np.mean(condition_coherence_rt[control_condition][x]) for x in control_coherences]
scale=1/(np.max(control_mean_rt)-np.min(control_mean_rt))
# Plot stimulation condition and preceding sham condition
for condition in plot_conditions:
# Scale and fit RT
coherences = sorted(condition_coherence_rt[condition].keys())
mean_rt = [scale*(np.mean(condition_coherence_rt[condition][coherence])-np.min(control_mean_rt)) for coherence in coherences]
stderr_rt = [scale*np.std(condition_coherence_rt[condition][coherence])/np.sqrt(len(condition_coherence_rt[condition][coherence])) for coherence in coherences]
rt_fit = FitRT(coherences, mean_rt, guess=[1,1,1], display=0)
# Plot with error
smoothInt = np.arange(.01, 1.0, 0.001)
smoothRT = rt_fit.eval(smoothInt)
ax.semilogx(smoothInt, smoothRT, plot_colors[condition], label=condition)
ax.errorbar(coherences, mean_rt, yerr=stderr_rt, fmt='o%s' % plot_colors[condition])
ax.legend(loc='best')
ax.set_xlabel('Coherence')
ax.set_ylabel('RT')
ax.set_xlim([0.01,1])
ax.set_ylim([-0.2, 1.6])
def plot_choice_rt_diff(plot_conditions, plot_colors, condition_coherence_rt_diff):
"""\
Plot RT difference over coherence level for each stimulation condition
colors = map (condition, color)
condition_coherence_rt - map of condition - coherence - mean RT difference for each subject
"""
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
# Plot each stimulation condition
for condition in plot_conditions:
# Fit difference to a line
coherences = np.array(sorted(condition_coherence_rt_diff[condition].keys()))
mean_diff=np.array([np.mean(condition_coherence_rt_diff[condition][coherence]) for coherence in coherences])
stderr_diff=[np.std(condition_coherence_rt_diff[condition][coherence])/np.sqrt(len(condition_coherence_rt_diff[condition][coherence])) for coherence in coherences]
clf = LinearRegression()
clf.fit(np.expand_dims(coherences,axis=1),np.expand_dims(mean_diff,axis=1))
a = clf.coef_[0][0]
b = clf.intercept_[0]
r_sqr=clf.score(np.expand_dims(coherences,axis=1), np.expand_dims(mean_diff,axis=1))
# Plot line with error
ax.plot([np.min(coherences), np.max(coherences)], [a * np.min(coherences) + b, a * np.max(coherences) + b], '--%s' % plot_colors[condition],
label='r^2=%.3f' % r_sqr)
ax.errorbar(coherences, mean_diff, yerr=stderr_diff, fmt='o%s' % plot_colors[condition], label=condition)
ax.legend(loc='best')
ax.set_xlim([0,0.55])
ax.set_ylim([-80,100])
ax.set_xlabel('Coherence')
ax.set_ylabel('RT Difference')
def plot_logistic_parameter_ratio(plot_conditions, plot_colors, condition_logistic_params, xlim=[-.1,.2], ylim=[0,35]):
"""
Plot logistic parameter ratio (a2/a1) as a histogram
colors = map (condition, color)
condition_logistic_params = map of logistic parameter (a1 or a2) - condition - parameter value for each subject
"""
# Plot limits and bin width
xx=np.arange(xlim[0],xlim[1],0.001)
binwidth=.02
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# For stimulation condition and preceding sham condition
for condition in plot_conditions:
# Compute ratio
ratio=np.array(condition_logistic_params['a2'][condition]) / np.array(condition_logistic_params['a1'][condition])
# Plot histogram
bins=np.arange(min(ratio), max(ratio) + binwidth, binwidth)
hist,edges=np.histogram(ratio, bins=bins)
center = (bins[:-1] + bins[1:]) / 2
ax.bar(center, hist/float(len(ratio))*100.0, color=plot_colors[condition], alpha=0.75, label=condition, width=binwidth)
# Fit and plot Gaussian
(mu, sigma) = norm.fit(ratio)
y = normpdf(xx, mu, sigma)*binwidth*100.0
ax.plot(xx, y, '%s--' % plot_colors[condition], linewidth=2)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.legend(loc='best')
ax.set_xlabel('a2/a1')
ax.set_ylabel('% subjects')
def plot_indecision_hist(plot_conditions, plot_colors, condition_sigmoid_offsets, xlim=[-.2,.4], ylim=[0,30]):
"""
Plot indecision point histogram
colors = map (condition, color)
condition_sigmoid_offsets = map of last response (L or R) - condition - coherence - sigmoid offset of each subject
"""
# Plot limits and bin width
xx=np.arange(xlim[0],xlim[1],0.001)
binwidth=0.03
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# For stimulation condition and preceding sham condition
for condition in plot_conditions:
# Compute difference (shift in indecision point)
diff=np.array(condition_sigmoid_offsets['L*'][condition])-np.array(condition_sigmoid_offsets['R*'][condition])
# Plot histogram
bins=np.arange(min(diff), max(diff)+binwidth, binwidth)
hist,edges=np.histogram(diff, bins=bins)
center = (bins[:-1] + bins[1:]) / 2
ax.bar(center, hist/float(len(diff))*100.0, color=plot_colors[condition], alpha=0.75, label=condition, width=binwidth)
# Fit and plot Gaussian
(mu, sigma) = norm.fit(diff)
y = normpdf(xx, mu, sigma)*binwidth*100.0
ax.plot(xx, y,'%s--' % plot_colors[condition], linewidth=2)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel('Left*-Right* Indifference')
ax.set_ylabel('% subjects')
ax.legend(loc='best')
def compare_stim_isi_hysteresis(stim_subjects, isi_subjects):
# Map of last response (L or R) - condition - coherence - sigmoid offset of each subject
stim_condition_sigmoid_offsets={
'L*': {},
'R*': {}
}
# Map of logistic parameter (a1 or a2) - condition - parameter value for each subject
stim_condition_logistic_params={
'a1': {},
'a2': {}
}
# For each subject
for subj_id in stim_subjects:
subject=stim_subjects[subj_id]
# Get choices, sigmoid offsets, logistic params
subj_condition_coherence_choices, subj_condition_sigmoid_offsets, subj_condition_logistic_params=analyze_subject_choice_hysteresis(subject)
# For each condition
for condition in stim_conditions:
# Init maps
if not condition in stim_condition_sigmoid_offsets['L*']:
stim_condition_sigmoid_offsets['L*'][condition]=[]
stim_condition_sigmoid_offsets['R*'][condition]=[]
stim_condition_logistic_params['a1'][condition]=[]
stim_condition_logistic_params['a2'][condition]=[]
# Update sigmoid offsets
stim_condition_sigmoid_offsets['L*'][condition].append(subj_condition_sigmoid_offsets['L*'][condition])
stim_condition_sigmoid_offsets['R*'][condition].append(subj_condition_sigmoid_offsets['R*'][condition])
# Update logistic params
stim_condition_logistic_params['a1'][condition].append(subj_condition_logistic_params['a1'][condition])
stim_condition_logistic_params['a2'][condition].append(subj_condition_logistic_params['a2'][condition])
# Map of last response (L or R) - condition - coherence - sigmoid offset of each subject
isi_sigmoid_offsets={
'L*': {
'low':[],
'high':[]
},
'R*': {
'low':[],
'high':[]
}
}
# Map of logistic parameter (a1 or a2) - condition - parameter value for each subject
isi_logistic_params={
'a1': {
'low':[],
'high':[]
},
'a2': {
'low':[],
'high':[]
}
}
# For each subject
for subj_id in isi_subjects:
subject=isi_subjects[subj_id]
# Get choices, sigmoid offsets, logistic params
subj_isi_coherence_choices, subj_isi_sigmoid_offsets, subj_isi_logistic_params=analyze_subject_isi_choice_hysteresis(subject)
# For each condition
for isi in isi_conditions:
# Update sigmoid offsets
isi_sigmoid_offsets['L*'][isi].append(subj_isi_sigmoid_offsets['L*'][isi])
isi_sigmoid_offsets['R*'][isi].append(subj_isi_sigmoid_offsets['R*'][isi])
# Update logistic params
isi_logistic_params['a1'][isi].append(subj_isi_logistic_params['a1'][isi])
isi_logistic_params['a2'][isi].append(subj_isi_logistic_params['a2'][isi])
indec_results={
'sham_anode': {},
'sham_cathode': {},
}
(indec_results['sham_anode']['U'],indec_results['sham_anode']['p'])=mannwhitneyu(np.array(stim_condition_sigmoid_offsets['L*']['ShamPreAnode'])-np.array(stim_condition_sigmoid_offsets['R*']['ShamPreAnode']),np.array(isi_sigmoid_offsets['L*']['low'])-np.array(isi_sigmoid_offsets['R*']['low']))
(indec_results['sham_cathode']['U'],indec_results['sham_cathode']['p'])=mannwhitneyu(np.array(stim_condition_sigmoid_offsets['L*']['ShamPreCathode'])-np.array(stim_condition_sigmoid_offsets['R*']['ShamPreCathode']),np.array(isi_sigmoid_offsets['L*']['low'])-np.array(isi_sigmoid_offsets['R*']['low']))
print('Indecision Points')
for condition, results in indec_results.iteritems():
print('%s: U=%.4f, p=%.4f' % (condition, results['U'], results['p']))
print('')
# Run stats on logistic parameters
log_results={
'sham_anode': {},
'sham_cathode': {},
}
sham_anode_ratio=np.array(stim_condition_logistic_params['a2']['ShamPreAnode'])/np.array(stim_condition_logistic_params['a1']['ShamPreAnode'])
sham_cathode_ratio=np.array(stim_condition_logistic_params['a2']['ShamPreCathode'])/np.array(stim_condition_logistic_params['a1']['ShamPreCathode'])
isi_ratio=np.array(isi_logistic_params['a2']['low'])/np.array(isi_logistic_params['a1']['low'])
(log_results['sham_anode']['U'],log_results['sham_anode']['p'])=mannwhitneyu(sham_anode_ratio,isi_ratio)
(log_results['sham_cathode']['U'],log_results['sham_cathode']['p'])=mannwhitneyu(sham_cathode_ratio,isi_ratio)
print('Logistic Regression')
for condition, results in log_results.iteritems():
print('%s: U=%.4f, p=%.4f' % (condition, results['U'],results['p']))
if __name__=='__main__':
print('*** Main analysis ***')
data_dir='../../../rdmd/data/stim'
stim_subjects=read_subjects(data_dir)
analyze_choice_hysteresis(stim_subjects, data_dir)
analyze_accuracy_rt(stim_subjects, data_dir)
print('\n*** ISI analysis ***')
data_dir='../../../rdmd/data/isi'
isi_subjects=read_isi_subjects(data_dir)
analyze_isi_choice_hysteresis(isi_subjects, data_dir)
analyze_isi_accuracy_rt(isi_subjects, data_dir)
plt.show()
print('\n*** Compare ISI analysis ***')
compare_stim_isi_hysteresis(stim_subjects, isi_subjects)
|
kwikadi/orange3
|
refs/heads/master
|
Orange/widgets/tests/test_owselectcolumns.py
|
11
|
from unittest import TestCase
from unittest.mock import Mock
from Orange.data import ContinuousVariable, DiscreteVariable, Domain
from Orange.widgets.data.contexthandlers import \
SelectAttributesDomainContextHandler
from Orange.widgets.settings import ContextSetting
from Orange.widgets.utils import vartype
Continuous = vartype(ContinuousVariable())
Discrete = vartype(DiscreteVariable())
class TestSelectAttributesDomainContextHandler(TestCase):
def setUp(self):
self.domain = Domain(
attributes=[ContinuousVariable('c1'),
DiscreteVariable('d1', values='abc'),
DiscreteVariable('d2', values='def')],
class_vars=[DiscreteVariable('d3', values='ghi')],
metas=[ContinuousVariable('c2'),
DiscreteVariable('d4', values='jkl')]
)
self.args = (self.domain,
{'c1': Continuous, 'd1': Discrete,
'd2': Discrete, 'd3': Discrete},
{'c2': Continuous, 'd4': Discrete, })
self.handler = SelectAttributesDomainContextHandler(metas_in_res=True)
self.handler.read_defaults = lambda: None
def test_open_context(self):
self.handler.bind(SimpleWidget)
context = Mock(
attributes=self.args[1], metas=self.args[2], values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2),
with_metas=[('d1', Discrete), ('d2', Discrete)]
))
self.handler.global_contexts = \
[Mock(values={}), context, Mock(values={})]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints,
{('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)})
def test_open_context_with_imperfect_match(self):
self.handler.bind(SimpleWidget)
context = Mock(values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d6', Discrete): ('attribute', 1),
('d7', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2)
))
self.handler.global_contexts = \
[Mock(values={}), context, Mock(values={})]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints,
{('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('c2', Continuous): ('class', 0)})
def test_open_context_with_no_match(self):
self.handler.bind(SimpleWidget)
context = Mock(values=dict(
domain_role_hints=({('d1', Discrete): ('available', 0),
('d2', Discrete): ('meta', 0),
('c1', Continuous): ('attribute', 0),
('d3', Discrete): ('attribute', 1),
('d4', Discrete): ('attribute', 2),
('c2', Continuous): ('class', 0)}, -2),
required=('g1', Continuous),
))
self.handler.global_contexts = [context]
widget = SimpleWidget()
self.handler.initialize(widget)
self.handler.open_context(widget, self.args[0])
self.assertEqual(widget.domain_role_hints, {})
class SimpleWidget:
domain_role_hints = ContextSetting({}, exclude_metas=False)
required = ContextSetting("", required=ContextSetting.REQUIRED)
def retrieveSpecificSettings(self):
pass
def storeSpecificSettings(self):
pass
|
mirac/msite2
|
refs/heads/master
|
msite2/settings.py
|
1
|
"""
Django settings for msite2 project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0dlg5&pb^@1om5vqkr-fyncy1=oc99*zwt-q)*%ksucxqx4nf*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'msite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'msite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
camilonova/sentry
|
refs/heads/master
|
src/sentry/api/endpoints/group_markseen.py
|
1
|
from __future__ import absolute_import
from django.utils import timezone
from rest_framework.response import Response
from sentry.api.base import Endpoint
from sentry.api.permissions import assert_perm
from sentry.db.models import create_or_update
from sentry.models import Project, Group, GroupSeen
from sentry.utils.functional import extract_lazy_object
class GroupMarkSeenEndpoint(Endpoint):
def post(self, request, group_id):
group = Group.objects.get(
id=group_id,
)
assert_perm(group, request.user, request.auth)
if group.project not in Project.objects.get_for_user(
team=group.project.team, user=request.user):
return Response(status=400)
instance, created = create_or_update(
GroupSeen,
group=group,
user=extract_lazy_object(request.user),
project=group.project,
defaults={
'last_seen': timezone.now(),
}
)
if created:
return Response(status=201)
return Response(status=204)
|
Suninus/NewsBlur
|
refs/heads/master
|
apps/analyzer/tests.py
|
19
|
from django.test.client import Client
from apps.rss_feeds.models import MStory
from django.test import TestCase
from django.core import management
# from apps.analyzer.classifier import FisherClassifier
import nltk
from itertools import groupby
from apps.analyzer.tokenizer import Tokenizer
from vendor.reverend.thomas import Bayes
from apps.analyzer.phrase_filter import PhraseFilter
class QuadgramCollocationFinder(nltk.collocations.AbstractCollocationFinder):
"""A tool for the finding and ranking of quadgram collocations or other association measures.
It is often useful to use from_words() rather thanconstructing an instance directly.
"""
def __init__(self, word_fd, quadgram_fd, trigram_fd, bigram_fd, wildcard_fd):
"""Construct a TrigramCollocationFinder, given FreqDists for appearances of words, bigrams, two words with any word between them,and trigrams."""
nltk.collocations.AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd)
self.trigram_fd = trigram_fd
self.bigram_fd = bigram_fd
self.wildcard_fd = wildcard_fd
@classmethod
def from_words(cls, words):
wfd = nltk.probability.FreqDist()
qfd = nltk.probability.FreqDist()
tfd = nltk.probability.FreqDist()
bfd = nltk.probability.FreqDist()
wildfd = nltk.probability.FreqDist()
for w1, w2, w3 ,w4 in nltk.util.ingrams(words, 4, pad_right=True):
wfd.inc(w1)
if w4 is None:
continue
else:
qfd.inc((w1,w2,w3,w4))
bfd.inc((w1,w2))
tfd.inc((w1,w2,w3))
wildfd.inc((w1,w3,w4))
wildfd.inc((w1,w2,w4))
return cls(wfd, qfd, tfd, bfd, wildfd)
def score_ngram(self, score_fn, w1, w2, w3, w4):
n_all = self.word_fd.N()
n_iiii = self.ngram_fd[(w1, w2, w3, w4)]
if not n_iiii:
return
n_iiix = self.bigram_fd[(w1, w2)]
n_iixi = self.bigram_fd[(w2, w3)]
n_ixii = self.bigram_fd[(w3, w4)]
n_xiii = self.bigram_fd[(w3, w4)]
n_iixx = self.word_fd[w1]
n_ixix = self.word_fd[w2]
n_ixxi = self.word_fd[w3]
n_ixxx = self.word_fd[w4]
n_xiix = self.trigram_fd[(w1, w2)]
n_xixi = self.trigram_fd[(w2, w3)]
n_xxii = self.trigram_fd[(w3, w4)]
n_xxxi = self.trigram_fd[(w3, w4)]
return score_fn(n_iiii,
(n_iiix, n_iixi, n_ixii, n_xiii),
(n_iixx, n_ixix, n_ixxi, n_ixxx),
(n_xiix, n_xixi, n_xxii, n_xxxi),
n_all)
class CollocationTest(TestCase):
fixtures = ['brownstoner.json']
def setUp(self):
self.client = Client()
def test_bigrams(self):
# bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
tokens = [
'Co-op', 'of', 'the', 'day',
'House', 'of', 'the', 'day',
'Condo', 'of', 'the', 'day',
'Development', 'Watch',
'Co-op', 'of', 'the', 'day',
]
finder = nltk.collocations.TrigramCollocationFinder.from_words(tokens)
finder.apply_freq_filter(2)
# return the 10 n-grams with the highest PMI
print finder.nbest(trigram_measures.pmi, 10)
titles = [
'Co-op of the day',
'Condo of the day',
'Co-op of the day',
'House of the day',
'Development Watch',
'Streetlevel',
]
tokens = nltk.tokenize.word(' '.join(titles))
ngrams = nltk.ngrams(tokens, 4)
d = [key for key, group in groupby(sorted(ngrams)) if len(list(group)) >= 2]
print d
class ClassifierTest(TestCase):
fixtures = ['classifiers.json', 'brownstoner.json']
def setUp(self):
self.client = Client()
#
# def test_filter(self):
# user = User.objects.all()
# feed = Feed.objects.all()
#
# management.call_command('loaddata', 'brownstoner.json', verbosity=0)
# response = self.client.get('/reader/refresh_feed', { "feed_id": 1, "force": True })
# management.call_command('loaddata', 'brownstoner2.json', verbosity=0)
# response = self.client.get('/reader/refresh_feed', { "feed_id": 1, "force": True })
# management.call_command('loaddata', 'gothamist1.json', verbosity=0)
# response = self.client.get('/reader/refresh_feed', { "feed_id": 4, "force": True })
# management.call_command('loaddata', 'gothamist2.json', verbosity=0)
# response = self.client.get('/reader/refresh_feed', { "feed_id": 4, "force": True })
#
# stories = Story.objects.filter(story_feed=feed[1]).order_by('-story_date')[:100]
#
# phrasefilter = PhraseFilter()
# for story in stories:
# # print story.story_title, story.id
# phrasefilter.run(story.story_title, story.id)
#
# phrasefilter.pare_phrases()
# phrasefilter.print_phrases()
#
def test_train(self):
# user = User.objects.all()
# feed = Feed.objects.all()
management.call_command('loaddata', 'brownstoner.json', verbosity=0)
management.call_command('refresh_feed', force=1, feed=1, single_threaded=True, daemonize=False)
management.call_command('loaddata', 'brownstoner2.json', verbosity=0)
management.call_command('refresh_feed', force=1, feed=1, single_threaded=True, daemonize=False)
stories = MStory.objects(story_feed_id=1)[:53]
phrasefilter = PhraseFilter()
for story in stories:
# print story.story_title, story.id
phrasefilter.run(story.story_title, story.id)
phrasefilter.pare_phrases()
phrases = phrasefilter.get_phrases()
print phrases
tokenizer = Tokenizer(phrases)
classifier = Bayes(tokenizer) # FisherClassifier(user[0], feed[0], phrases)
classifier.train('good', 'House of the Day: 393 Pacific St.')
classifier.train('good', 'House of the Day: 393 Pacific St.')
classifier.train('good', 'Condo of the Day: 393 Pacific St.')
classifier.train('good', 'Co-op of the Day: 393 Pacific St. #3')
classifier.train('good', 'Co-op of the Day: 393 Pacific St. #3')
classifier.train('good', 'Development Watch: 393 Pacific St. #3')
classifier.train('bad', 'Development Watch: 393 Pacific St. #3')
classifier.train('bad', 'Development Watch: 393 Pacific St. #3')
classifier.train('bad', 'Development Watch: 393 Pacific St. #3')
classifier.train('bad', 'Streetlevel: 393 Pacific St. #3')
guess = dict(classifier.guess('Co-op of the Day: 413 Atlantic'))
self.assertTrue(guess['good'] > .99)
self.assertTrue('bad' not in guess)
guess = dict(classifier.guess('House of the Day: 413 Atlantic'))
self.assertTrue(guess['good'] > .99)
self.assertTrue('bad' not in guess)
guess = dict(classifier.guess('Development Watch: Yatta'))
self.assertTrue(guess['bad'] > .7)
self.assertTrue(guess['good'] < .3)
guess = dict(classifier.guess('Development Watch: 393 Pacific St.'))
self.assertTrue(guess['bad'] > .7)
self.assertTrue(guess['good'] < .3)
guess = dict(classifier.guess('Streetlevel: 123 Carlton St.'))
self.assertTrue(guess['bad'] > .99)
self.assertTrue('good' not in guess)
guess = classifier.guess('Extra, Extra')
self.assertTrue('bad' not in guess)
self.assertTrue('good' not in guess)
guess = classifier.guess('Nothing doing: 393 Pacific St.')
self.assertTrue('bad' not in guess)
self.assertTrue('good' not in guess)
|
sbryan12144/BeastMode-Elite
|
refs/heads/master
|
scripts/build-all.py
|
1250
|
#! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
HoliestCow/ece692_deeplearning
|
refs/heads/master
|
project5/gru/crnn.py
|
1
|
import tensorflow as tf
import numpy as np
import time
import h5py
# import matplotlib.pyplot as plt
# from sklearn.metrics import confusion_matrix
# import itertools
# from copy import deepcopy
# import os
# import os.path
from collections import OrderedDict
# import pickle
from itertools import islice
# NOTE: For python2
# import cPickle as pickle
# from tensorflow.examples.tutorials.mnist import input_data
#i mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class cnnMNIST(object):
def __init__(self):
self.use_gpu = False
self.lr = 1e-3
self.epochs = 31
self.runname = 'meh'
self.training_keep_prob = 1.0
self.build_graph()
self.dataset_filename = 'sequential_dataset_relabel_allseconds.h5'
def onehot_labels(self, labels):
out = np.zeros((labels.shape[0], 7))
for i in range(labels.shape[0]):
out[i, :] = np.eye(7, dtype=int)[int(labels[i])]
return out
def onenothot_labels(self, labels):
out = np.zeros((labels.shape[0],))
for i in range(labels.shape[0]):
out[i] = np.argmax(labels[i, :])
return out
def get_data(self):
# data_norm = True
# data_augmentation = False
f = h5py.File('../data/{}'.format(self.dataset_filename), 'r')
X = f['train']
X_test = f['test']
self.x_train = X
self.x_test = X_test
# NOTE: always use the keylist to get data
self.data_keylist = list(X.keys())
return
def batch(self, iterable, n=1, shuffle=True, small_test=True, usethesekeys = None, shortset=False):
if shuffle:
self.shuffle()
if usethesekeys is None:
keylist = self.data_keylist
else:
keylist = usethesekeys
if shortset:
keylist = usethesekeys[:50]
sequence_length = 16
# l = len(iterable)
for i in range(len(keylist)):
self.current_key = keylist[i]
x = np.array(iterable[keylist[i]]['measured_spectra'])
y = np.array(iterable[keylist[i]]['labels'])
index = np.arange(x.shape[0])
index_generator = self.window(index, n=sequence_length)
# tostore_spectra = np.zeros((0, sequence_length, 1024))
tostore_spectra = []
tostore_labels = []
for index_list in index_generator:
# tostore_spectra = np.concatenate((tostore_spectra, x[index_list, :].reshape((1, sequence_length, 1024))))
tostore_spectra += [x[index_list, :].reshape((1, sequence_length, 1024))]
tostore_labels += [y[list(index_list)[int(sequence_length / 2) - 1]]]
tostore_spectra = np.concatenate(tostore_spectra, axis=0)
tostore_labels = np.array(tostore_labels)
x = tostore_spectra
y = self.onehot_labels(tostore_labels)
self.current_batch_length = x.shape[0]
yield x, y
def memory_batch(self, iterable, n=1, shuffle=True, small_test=True, usethesekeys = None, shortset=False):
if shuffle:
self.shuffle()
if usethesekeys is None:
keylist = self.data_keylist
else:
keylist = usethesekeys
if shortset:
keylist = usethesekeys[:100]
max_batch_size = 32
sequence_length = 16
# l = len(iterable)
for i in range(len(keylist)):
self.current_key = keylist[i]
x = np.array(iterable[keylist[i]]['measured_spectra'])
y = np.array(iterable[keylist[i]]['labels'])
# mask = y >= 0.5
# y[mask] = 1
index = np.arange(x.shape[0])
index_generator = self.window(index, n=sequence_length)
# tostore_spectra = np.zeros((0, sequence_length, 1024))
tostore_spectra = []
tostore_labels = []
for index_list in index_generator:
# tostore_spectra = np.concatenate((tostore_spectra, x[index_list, :].reshape((1, sequence_length, 1024))))
tostore_spectra += [x[index_list, :].reshape((1, sequence_length, 1024))]
tostore_labels += [y[list(index_list)[-1]]]
tostore_spectra = np.concatenate(tostore_spectra, axis=0)
tostore_labels = np.array(tostore_labels)
self.howmanytimes = int(np.ceil(tostore_spectra.shape[0] / max_batch_size))
# self.remainder = tostore_spectra.shape[0] % max_batch_size
for j in range(self.howmanytimes + 1):
start = j * max_batch_size
end = ((j + 1) * max_batch_size)
if end > tostore_spectra.shape[0]:
end = tostore_spectra.shape[0]
x = tostore_spectra[start:end, :, :]
if x.shape[0] == 0:
continue
y = self.onehot_labels(tostore_labels[start:end])
yield x, y
# for j in range(self.current_batch_length):
# stuff = y[j,:]
# stuff = stuff.reshape((1, 7))
# yield x[j, :], stuff, z[j]
def memory_validation_batcher(self):
# f = h5py.File('./sequential_dataset_validation.h5', 'r')
# NOTE: for using cnnfeatures sequential dataset
# f = h5py.File('sequential_dataset_validation.h5', 'r')
f = h5py.File('../data/{}'.format('sequential_dataset_relabel_testset_validationonly.h5'), 'r')
g = f['validate']
samplelist = list(g.keys())
# samplelist = samplelist[:100]
sequence_length = 16
max_batch_size = 64
for i in range(len(samplelist)):
self.current_sample_name = samplelist[i]
data = np.array(g[samplelist[i]])
index = np.arange(data.shape[0])
index_generator = self.window(index, n=sequence_length)
# tostore_spectra = np.zeros((0, sequence_length, 1024))
tostore_spectra = []
for index_list in index_generator:
# tostore_spectra = np.concatenate((tostore_spectra, data[index_list, :].reshape((1, sequence_length, 1024))))
tostore_spectra += [data[index_list, :].reshape((1, sequence_length, 1024))]
# yield tostore_spectra, samplelist[i]
tostore_spectra = np.concatenate(tostore_spectra, axis=0)
self.howmanytimes = int(np.ceil(tostore_spectra.shape[0] / max_batch_size))
for j in range(self.howmanytimes + 1):
start = j * max_batch_size
end = (j + 1) * max_batch_size
if end > tostore_spectra.shape[0]:
end = tostore_spectra.shape[0]
x = tostore_spectra[start:end, :, :]
if x.shape[0] == 0:
continue
yield x
def archived_validation_batcher(self):
# f = h5py.File('./sequential_dataset_validation.h5', 'r')
# NOTE: for using cnnfeatures sequential dataset
# f = h5py.File('sequential_dataset_validation.h5', 'r')
try:
f = h5py.File('../data/sequential_dataset_relabel_testset_validationonly.h5', 'r')
except:
pass
g = f['validate']
samplelist = list(g.keys())
# samplelist = samplelist[:10]
sequence_length = 16
max_batch_size = 64
for i in range(len(samplelist)):
self.current_sample_name = samplelist[i]
data = np.array(g[samplelist[i]])
index = np.arange(data.shape[0])
index_generator = self.window(index, n=sequence_length)
tostore_spectra = np.zeros((0, sequence_length, 1024))
for index_list in index_generator:
tostore_spectra = np.concatenate((tostore_spectra, data[index_list, :].reshape((1, sequence_length, 1024))))
# yield tostore_spectra, samplelist[i]
self.howmanytimes = int(np.ceil(tostore_spectra.shape[0] / max_batch_size))
for j in range(self.howmanytimes + 1):
start = j * max_batch_size
end = (j + 1) * max_batch_size
if end > tostore_spectra.shape[0]:
end = tostore_spectra.shape[0]
x = tostore_spectra[start:end, :, :]
if x.shape[0] == 0:
continue
y = self.onehot_labels(tostore_labels[start:end])
yield x, y
def window(self, seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def build_graph(self):
# NOTE: CNN
# self.x = tf.placeholder(tf.float32, shape=[None, 1024])
# self.y_ = tf.placeholder(tf.float32, shape=[None, 7])
self.x = tf.placeholder(tf.float32, shape=[None, 16, 1024])
self.y_ = tf.placeholder(tf.float32, shape=[None, 7])
self.keep_prob = tf.placeholder(tf.float32, shape=[])
# self.weights = tf.placeholder(tf.float32, shape=[30])
feature_map1 = 10
feature_map2 = 20
feature_map3 = 20
feature_map4 = 20
num_units = 15
num_layers = 2
# x_image = self.hack_1dreshape(self.x)
# print(x_image.shape)
# define conv-layer variables
W_conv1 = self.weight_variable([3, 3, 1, feature_map1]) # first conv-layer has 32 kernels, size=5
b_conv1 = self.bias_variable([feature_map1])
x_expanded = tf.expand_dims(self.x, 3)
W_conv2 = self.weight_variable([3, 3, feature_map1, feature_map2])
b_conv2 = self.bias_variable([feature_map2])
W_conv3 = self.weight_variable([3, 3, feature_map2, feature_map3])
b_conv3 = self.bias_variable([feature_map3])
W_conv4 = self.weight_variable([3, 3, feature_map3, feature_map4])
b_conv4 = self.bias_variable([feature_map4])
# W_conv5 = self.weight_variable([3, 3, feature_map4, feature_map5])
# b_conv5 = self.bias_variable([feature_map5])
# W_conv6 = self.weight_variable([3, 3, feature_map5, feature_map6])
# b_conv6 = self.bias_variable([feature_map6])
# W_conv7 = self.weight_variable([3, 3, feature_map6, feature_map7])
# b_conv7 = self.bias_variable([feature_map7])
# W_conv8 = self.weight_variable([3, 3, feature_map7, feature_map8])
# b_conv8 = self.bias_variable([feature_map8])
# x_image = tf.reshape(self.x, [-1, 78, 78, 1])
h_conv1 = tf.nn.relu(self.conv2d(x_expanded, W_conv1) + b_conv1)
h_pool1 = self.max_pool(h_conv1, [1, 1, 4, 1], [1, 1, 4, 1])
h_pool1_dropped = tf.nn.dropout(h_pool1, self.keep_prob)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1_dropped, W_conv2) + b_conv2)
h_pool2 = self.max_pool(h_conv2, [1, 1, 4, 1], [1, 1, 4, 1])
h_pool2_dropped = tf.nn.dropout(h_pool2, self.keep_prob)
h_conv3 = tf.nn.relu(self.conv2d(h_pool2_dropped, W_conv3) + b_conv3)
h_pool3 = self.max_pool(h_conv3, [1, 1, 4, 1], [1, 1, 4, 1])
h_pool3_dropped = tf.nn.dropout(h_pool3, self.keep_prob)
h_conv4 = tf.nn.relu(self.conv2d(h_pool3_dropped, W_conv4) + b_conv4)
h_pool4 = self.max_pool(h_conv4, [1, 1, 4, 1], [1, 1, 4, 1])
# h_pool4_dropped = tf.nn.dropout(h_pool4, self.keep_prob)
# h_conv5 = tf.nn.relu(self.conv2d(h_pool4_dropped, W_conv5) + b_conv5)
# h_pool5 = self.max_pool(h_conv5, [1, 1, 4, 1], [1, 1, 4, 1])
# h_pool5_dropped = tf.nn.dropout(h_pool5, self.keep_prob)
# h_conv6 = tf.nn.relu(self.conv2d(h_pool5_dropped, W_conv6) + b_conv6)
# h_pool6 = self.max_pool(h_conv6, [1, 1, 4, 1], [1, 1, 2, 1])
# h_pool6_dropped = tf.nn.dropout(h_pool6, self.keep_prob)
# h_conv7 = tf.nn.relu(self.conv2d(h_pool6_dropped, W_conv7) + b_conv7)
# h_pool7 = self.max_pool(h_conv7, [1, 1, 4, 1], [1, 1, 2, 1])
# h_pool7_dropped = tf.nn.dropout(h_pool7, self.keep_prob)
# h_conv8 = tf.nn.relu(self.conv2d(h_pool7_dropped, W_conv8) + b_conv8)
# h_pool8 = self.max_pool(h_conv8, [1, 1, 4, 1], [1, 1, 2, 1])
# h_pool8_dropped = tf.nn.dropout(h_pool8, self.keep_prob)
# h_conv9 = tf.nn.relu(self.conv2d(h_pool8_dropped, W_conv2) + b_conv2)
# h_pool9 = self.max_pool_2x2(h_conv9)
# h_pool9_dropped = tf.nn.dropout(h_pool9, self.keep_prob)
# h_pool8_flat = tf.reshape(h_pool8_dropped, [-1, 8, feature_map8 * 4]) # * 64])
h_pool4_flat = tf.reshape(h_pool4, [-1, 16, feature_map4 * 4])
cnn_output = h_pool4_flat
cells = []
for _ in range(num_layers):
cell = tf.contrib.rnn.GRUCell(num_units) # Or LSTMCell(num_units)
# cell = tf.contrib.rnn.DropoutWrapper(
# cell, output_keep_prob=self.keep_prob2)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells)
# output, state = tf.nn.dynamic_rnn(cell, cnn_output, dtype=tf.float32)
output, _ = tf.nn.dynamic_rnn(cell, cnn_output, dtype=tf.float32)
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
out_size = self.y_.get_shape()[1].value
# logit = tf.contrib.layers.fully_connected(
# last, out_size, activation_fn=None)
# self.y_conv = tf.nn.softmax(logit)
# self.loss = tf.reduce_sum(tf.losses.softmax_cross_entropy(self.y_, self.y_conv))
self.y_conv = tf.contrib.layers.fully_connected(last, out_size, activation_fn=None)
# classes_weights = tf.constant([1.0, 1.0])
# classes_weights = tf.constant([0.1, 0.6])
# classes_weights = tf.constant([0.1, 1.0]) # works ok after 300 epochs
# classes_weights = tf.constant([0.1, 1.5]) # I haven't tried this one yet.
# cross_entropy = tf.nn.weighted_cross_entropy_with_logits(logits=self.y_conv, targets=self.y_, pos_weight=classes_weights)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.y_conv, labels=self.y_)
# self.loss = tf.reduce_sum(cross_entropy)
self.loss = tf.reduce_mean(cross_entropy)
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
# self.train_step = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
def shuffle(self):
np.random.shuffle(self.data_keylist)
return
def train(self):
if self.use_gpu:
# use half of the gpu memory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
else:
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.eval() # creating evaluation
a = time.time()
for i in range(self.epochs):
if i % 5 == 0 and i != 0:
x_generator_test = self.memory_batch(self.x_test,
usethesekeys=list(self.x_test.keys()), shortset=True, shuffle=False)
# first, second = next(x_generator_test)
# for outerloop in range(self.howmanytimes):
first, second = next(x_generator_test)
test_acc = self.sess.run(self.accuracy, feed_dict={self.x: first,
self.y_: second, self.keep_prob: 1.0})
# self.weights: z})
train_acc, train_loss = self.sess.run([self.accuracy, self.loss],
feed_dict={self.x: x, self.y_: y, self.keep_prob: 1.0})
b = time.time()
print('step {}:\n train acc {} | test acc {}\ntime elapsed: {} s'.format(i, train_acc, test_acc, b - a))
# NOTE: QUick and dirty preprocessing. normalize to counts
# x = x / x.sum(axis=-1, keepdims=True)
x_generator = self.memory_batch(self.x_train, shuffle=True)
for x, y in x_generator:
self.sess.run([self.train_step], feed_dict={
self.x: x,
self.y_: y,
self.keep_prob: self.training_keep_prob})
# self.weights: z})
# self.shuffle()
def eval(self):
# self.time_index = np.arange(self.y_conv.get_shape()[0])
self.prediction = tf.argmax(self.y_conv, 1)
truth = tf.argmax(self.y_, 1)
correct_prediction = tf.equal(self.prediction, truth)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# def test_eval(self):
# self.eval()
# x_generator = self.batch(self.x_test, n=100, shuffle=False)
# y_generator = self.batch(self.y_test, n=100, shuffle=False)
# test_acc = []
# counter = 0
# for data in x_generator:
# test_acc += [self.sess.run(self.accuracy, feed_dict={
# self.x: data, self.y_: next(y_generator), self.keep_prob: 1.0})]
# total_test_acc = sum(test_acc) / float(len(test_acc))
# print('test accuracy %g' % total_test_acc)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def hack_1dreshape(self, x):
# expand its dimensionality to fit into conv2d
tensor_expand = tf.expand_dims(x, -1)
# tensor_expand = tf.expand_dims(tensor_expand, 1)
return tensor_expand
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(self, x, ksize, strides):
return tf.nn.max_pool(x, ksize=ksize,
strides=strides, padding='SAME')
def max_pool_spectra(self, x):
return tf.nn.max_pool(x, ksize=[1, 1, 2, 1],
strides=[1, 1, 2, 1], padding='SAME')
def get_label_predictions(self):
x_batcher = self.batch(self.x_test, n=1000, shuffle=False,
usethesekeys=list(self.x_test.keys()))
# y_batcher = self.batch(self.y_test, n=1000, shuffle=False)
predictions = []
correct_predictions = np.zeros((0, 7))
counter = 0
a = time.time()
for x, y in x_batcher:
counter += 1
# x_features = x / x.sum(axis=-1, keepdims=True)
if counter % 1000 == 0:
print('label predictions done: {} in {} s'.format(counter, time.time() - a))
x_features = x
temp_predictions, score = self.sess.run(
[self.prediction, self.y_conv],
feed_dict={self.x: x_features,
self.keep_prob: 1.0})
predictions += temp_predictions.tolist()
correct_predictions = np.vstack((correct_predictions, y))
return predictions, correct_predictions, score
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def group_consecutives(vals, step=1):
"""Return list of consecutive lists of numbers from vals (number list)."""
run = []
result = [run]
expect = None
for v in vals:
if (v == expect) or (expect is None):
run.append(v)
else:
run = [v]
result.append(run)
expect = v + step
return result
def longest(l):
if len(l) == 0:
return None, None
# if(not isinstance(l, list)): return(0)
# return(max([len(l),] + [len(subl) for subl in l if isinstance(subl, list)] +
# [longest(subl) for subl in l]))
max_index = -1
max_length = 0
counter = 0
for item in l:
current_index = counter
current_length = len(item)
if current_length > max_length:
max_index = current_index
max_length = current_length
counter += 1
return max_index, max_length
def main():
cnn = cnnMNIST()
validate_please = True
# characterize = True
cnn.use_gpu = True
cnn.lr = 1e-3
cnn.epochs = 16
# slow on 0.25
cnn.training_keep_prob = 1.0
cnn.dataset_filename = 'sequential_dataset_relabel_allseconds.h5'
cnn.runname = 'cnndetalt3_relabel_lr{}_ep{}_data{}'.format(cnn.lr, cnn.epochs, cnn.dataset_filename)
runname = cnn.runname
a = time.time()
print('Retrieving data')
cnn.get_data()
b = time.time()
print('Built the data in {} s'.format(b-a))
a = time.time()
cnn.train()
b = time.time()
print('Training time: {} s'.format(b-a))
# cnn.test_eval()
# if characterize:
# predictions, y, score = cnn.get_label_predictions()
# predictions_decode = predictions
# labels_decode = cnn.onenothot_labels(y)
# np.save('{}_predictions.npy'.format(runname), predictions_decode)
# np.save('{}_ground_truth.npy'.format(runname), labels_decode)
# print('Confusion matrix data saved')
if validate_please:
validation_data = cnn.memory_validation_batcher()
answers = open('approach3_answers_crnn_{}.csv'.format(cnn.epochs), 'w')
answers.write('RunID,SourceID,SourceTime,Comment\n')
counter = 0
toggle = 0
temp_x = []
for sample in validation_data:
x = sample
temp_spectra = np.squeeze(x[:, 8, :])
if len(temp_spectra.shape) == 1:
temp_spectra = np.expand_dims(temp_spectra, axis=0)
temp_x += [temp_spectra]
if toggle == 0:
predictions = cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x,
cnn.keep_prob: 1.0})
else:
predictions = np.concatenate((predictions, cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x,
cnn.keep_prob: 1.0})))
toggle += 1
if toggle == cnn.howmanytimes:
temp_x = np.array(temp_x)
temp_x = np.concatenate(temp_x, axis=0)
predictions = np.array(predictions)
predictions = predictions.flatten()
time_index = np.arange(predictions.shape[0])
mask = predictions >= 0.5
if np.sum(mask) != 0:
machine = np.argwhere(mask == True)
grouping = group_consecutives(machine)
indicies = max(grouping, key=len)
counts = np.sum(temp_x, axis=1)
indicies = [int(i) for i in indicies]
if len(indicies) > 5:
t = time_index[indicies]
current_predictions = predictions[indicies]
t = [int(i) for i in t]
index_guess = np.argmax(counts[t])
current_time = t[index_guess] + 8
answers.write('{},{},{},\n'.format(
cnn.current_sample_name, current_predictions[index_guess], t[index_guess]))
else:
answers.write('{},0,0,\n'.format(cnn.current_sample_name))
else:
answers.write('{},0,0,\n'.format(cnn.current_sample_name))
predictions = []
temp_x = []
toggle = 0
counter += 1
answers.close()
return
main()
|
webrecorder/webrecorder
|
refs/heads/master
|
webrecorder/test/test_no_anon.py
|
1
|
import os
from .testutils import BaseWRTests
import pytest
# ============================================================================
@pytest.fixture(params=['/record/http://example.com/',
'/_new/foo/rec-sesh/record/http://example.com/',
'/_new/foo/rec-sesh/record/mp_/http://example.com/',
'/_new/temp/rec-sesh/extract/mp_/http://example.com/'])
def url(request):
return request.param
# ============================================================================
class TestNoAnon(BaseWRTests):
@classmethod
def setup_class(cls):
os.environ['ANON_DISABLED'] = '1'
super(TestNoAnon, cls).setup_class()
@classmethod
def teardown_class(cls):
super(TestNoAnon, cls).teardown_class()
os.environ['ANON_DISABLED'] = '0'
def test_anon_rec_disabled(self, url):
res = self.testapp.get(url)
assert res.status_code == 302
assert res.headers['Location'] == 'http://localhost:80/'
res = res.follow()
assert 'anonymous recording is not available' in res.text
assert '<input>' not in res.text
|
tushar7795/MicroBlog
|
refs/heads/master
|
flask/lib/python2.7/site-packages/babel/dates.py
|
8
|
# -*- coding: utf-8 -*-
"""
babel.dates
~~~~~~~~~~~
Locale dependent formatting and parsing of dates and times.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_TIME``,
* ``LC_ALL``, and
* ``LANG``
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import re
import warnings
import pytz as _pytz
from datetime import date, datetime, time, timedelta
from bisect import bisect_right
from babel.core import default_locale, get_global, Locale
from babel.util import UTC, LOCALTZ
from babel._compat import string_types, integer_types, number_types
LC_TIME = default_locale('LC_TIME')
# Aliases for use in scopes where the modules are shadowed by local variables
date_ = date
datetime_ = datetime
time_ = time
def get_timezone(zone=None):
"""Looks up a timezone by name and returns it. The timezone object
returned comes from ``pytz`` and corresponds to the `tzinfo` interface and
can be used with all of the functions of Babel that operate with dates.
If a timezone is not known a :exc:`LookupError` is raised. If `zone`
is ``None`` a local zone object is returned.
:param zone: the name of the timezone to look up. If a timezone object
itself is passed in, mit's returned unchanged.
"""
if zone is None:
return LOCALTZ
if not isinstance(zone, string_types):
return zone
try:
return _pytz.timezone(zone)
except _pytz.UnknownTimeZoneError:
raise LookupError('Unknown timezone %s' % zone)
def get_next_timezone_transition(zone=None, dt=None):
"""Given a timezone it will return a :class:`TimezoneTransition` object
that holds the information about the next timezone transition that's going
to happen. For instance this can be used to detect when the next DST
change is going to happen and how it looks like.
The transition is calculated relative to the given datetime object. The
next transition that follows the date is used. If a transition cannot
be found the return value will be `None`.
Transition information can only be provided for timezones returned by
the :func:`get_timezone` function.
:param zone: the timezone for which the transition should be looked up.
If not provided the local timezone is used.
:param dt: the date after which the next transition should be found.
If not given the current time is assumed.
"""
zone = get_timezone(zone)
if dt is None:
dt = datetime.utcnow()
else:
dt = dt.replace(tzinfo=None)
if not hasattr(zone, '_utc_transition_times'):
raise TypeError('Given timezone does not have UTC transition '
'times. This can happen because the operating '
'system fallback local timezone is used or a '
'custom timezone object')
try:
idx = max(0, bisect_right(zone._utc_transition_times, dt))
old_trans = zone._transition_info[idx - 1]
new_trans = zone._transition_info[idx]
old_tz = zone._tzinfos[old_trans]
new_tz = zone._tzinfos[new_trans]
except (LookupError, ValueError):
return None
return TimezoneTransition(
activates=zone._utc_transition_times[idx],
from_tzinfo=old_tz,
to_tzinfo=new_tz,
reference_date=dt
)
class TimezoneTransition(object):
"""A helper object that represents the return value from
:func:`get_next_timezone_transition`.
"""
def __init__(self, activates, from_tzinfo, to_tzinfo, reference_date=None):
#: the time of the activation of the timezone transition in UTC.
self.activates = activates
#: the timezone from where the transition starts.
self.from_tzinfo = from_tzinfo
#: the timezone for after the transition.
self.to_tzinfo = to_tzinfo
#: the reference date that was provided. This is the `dt` parameter
#: to the :func:`get_next_timezone_transition`.
self.reference_date = reference_date
@property
def from_tz(self):
"""The name of the timezone before the transition."""
return self.from_tzinfo._tzname
@property
def to_tz(self):
"""The name of the timezone after the transition."""
return self.to_tzinfo._tzname
@property
def from_offset(self):
"""The UTC offset in seconds before the transition."""
return int(self.from_tzinfo._utcoffset.total_seconds())
@property
def to_offset(self):
"""The UTC offset in seconds after the transition."""
return int(self.to_tzinfo._utcoffset.total_seconds())
def __repr__(self):
return '<TimezoneTransition %s -> %s (%s)>' % (
self.from_tz,
self.to_tz,
self.activates,
)
def get_period_names(locale=LC_TIME):
"""Return the names for day periods (AM/PM) used by the locale.
>>> get_period_names(locale='en_US')['am']
u'AM'
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).periods
def get_day_names(width='wide', context='format', locale=LC_TIME):
"""Return the day names used by the locale for the specified format.
>>> get_day_names('wide', locale='en_US')[1]
u'Tuesday'
>>> get_day_names('abbreviated', locale='es')[1]
u'mar.'
>>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1]
u'D'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).days[context][width]
def get_month_names(width='wide', context='format', locale=LC_TIME):
"""Return the month names used by the locale for the specified format.
>>> get_month_names('wide', locale='en_US')[1]
u'January'
>>> get_month_names('abbreviated', locale='es')[1]
u'ene.'
>>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1]
u'J'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).months[context][width]
def get_quarter_names(width='wide', context='format', locale=LC_TIME):
"""Return the quarter names used by the locale for the specified format.
>>> get_quarter_names('wide', locale='en_US')[1]
u'1st quarter'
>>> get_quarter_names('abbreviated', locale='de_DE')[1]
u'Q1'
:param width: the width to use, one of "wide", "abbreviated", or "narrow"
:param context: the context, either "format" or "stand-alone"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).quarters[context][width]
def get_era_names(width='wide', locale=LC_TIME):
"""Return the era names used by the locale for the specified format.
>>> get_era_names('wide', locale='en_US')[1]
u'Anno Domini'
>>> get_era_names('abbreviated', locale='de_DE')[1]
u'n. Chr.'
:param width: the width to use, either "wide", "abbreviated", or "narrow"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).eras[width]
def get_date_format(format='medium', locale=LC_TIME):
"""Return the date formatting patterns used by the locale for the specified
format.
>>> get_date_format(locale='en_US')
<DateTimePattern u'MMM d, y'>
>>> get_date_format('full', locale='de_DE')
<DateTimePattern u'EEEE, d. MMMM y'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).date_formats[format]
def get_datetime_format(format='medium', locale=LC_TIME):
"""Return the datetime formatting patterns used by the locale for the
specified format.
>>> get_datetime_format(locale='en_US')
u'{1}, {0}'
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
patterns = Locale.parse(locale).datetime_formats
if format not in patterns:
format = None
return patterns[format]
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format]
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME):
"""Return the timezone associated with the given `datetime` object formatted
as string indicating the offset from GMT.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> get_timezone_gmt(dt, locale='en')
u'GMT+00:00'
>>> tz = get_timezone('America/Los_Angeles')
>>> dt = tz.localize(datetime(2007, 4, 1, 15, 30))
>>> get_timezone_gmt(dt, locale='en')
u'GMT-07:00'
>>> get_timezone_gmt(dt, 'short', locale='en')
u'-0700'
The long format depends on the locale, for example in France the acronym
UTC string is used instead of GMT:
>>> get_timezone_gmt(dt, 'long', locale='fr_FR')
u'UTC-07:00'
.. versionadded:: 0.9
:param datetime: the ``datetime`` object; if `None`, the current date and
time in UTC is used
:param width: either "long" or "short"
:param locale: the `Locale` object, or a locale string
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, integer_types):
datetime = datetime_.utcfromtimestamp(datetime).time()
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
locale = Locale.parse(locale)
offset = datetime.tzinfo.utcoffset(datetime)
seconds = offset.days * 24 * 60 * 60 + offset.seconds
hours, seconds = divmod(seconds, 3600)
if width == 'short':
pattern = u'%+03d%02d'
else:
pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
return pattern % (hours, seconds // 60)
def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME):
u"""Return a representation of the given timezone using "location format".
The result depends on both the local display name of the country and the
city associated with the time zone:
>>> tz = get_timezone('America/St_Johns')
>>> print(get_timezone_location(tz, locale='de_DE'))
Kanada (St. John’s) Zeit
>>> tz = get_timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt) Zeit'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\\xe4ische Zeit'
.. versionadded:: 0.9
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if `None`, the current date and time in
UTC is assumed
:param locale: the `Locale` object, or a locale string
:return: the localized timezone name using location format
"""
if dt_or_tzinfo is None:
dt = datetime.now()
tzinfo = LOCALTZ
elif isinstance(dt_or_tzinfo, string_types):
dt = None
tzinfo = get_timezone(dt_or_tzinfo)
elif isinstance(dt_or_tzinfo, integer_types):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt or datetime.utcnow())
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Otherwise, if there is only one timezone for the country, return the
# localized country name
region_format = locale.zone_formats['region']
territory = get_global('zone_territories').get(zone)
if territory not in locale.territories:
territory = 'ZZ' # invalid/unknown
territory_name = locale.territories[territory]
if territory and len(get_global('territory_zones').get(territory, [])) == 1:
return region_format % (territory_name)
# Otherwise, include the city in the output
fallback_format = locale.zone_formats['fallback']
if 'city' in info:
city_name = info['city']
else:
metazone = get_global('meta_zones').get(zone)
metazone_info = locale.meta_zones.get(metazone, {})
if 'city' in metazone_info:
city_name = metazone_info['city']
elif '/' in zone:
city_name = zone.split('/', 1)[1].replace('_', ' ')
else:
city_name = zone.replace('_', ' ')
return region_format % (fallback_format % {
'0': city_name,
'1': territory_name
})
def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False,
locale=LC_TIME, zone_variant=None):
r"""Return the localized display name for the given timezone. The timezone
may be specified using a ``datetime`` or `tzinfo` object.
>>> dt = time(15, 30, tzinfo=get_timezone('America/Los_Angeles'))
>>> get_timezone_name(dt, locale='en_US')
u'Pacific Standard Time'
>>> get_timezone_name(dt, width='short', locale='en_US')
u'PST'
If this function gets passed only a `tzinfo` object and no concrete
`datetime`, the returned display name is indenpendent of daylight savings
time. This can be used for example for selecting timezones, or to set the
time of events that recur across DST changes:
>>> tz = get_timezone('America/Los_Angeles')
>>> get_timezone_name(tz, locale='en_US')
u'Pacific Time'
>>> get_timezone_name(tz, 'short', locale='en_US')
u'PT'
If no localized display name for the timezone is available, and the timezone
is associated with a country that uses only a single timezone, the name of
that country is returned, formatted according to the locale:
>>> tz = get_timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Mitteleurop\xe4ische Zeit'
>>> get_timezone_name(tz, locale='pt_BR')
u'Hor\xe1rio da Europa Central'
On the other hand, if the country uses multiple timezones, the city is also
included in the representation:
>>> tz = get_timezone('America/St_Johns')
>>> get_timezone_name(tz, locale='de_DE')
u'Neufundland-Zeit'
Note that short format is currently not supported for all timezones and
all locales. This is partially because not every timezone has a short
code in every locale. In that case it currently falls back to the long
format.
For more information see `LDML Appendix J: Time Zone Display Names
<http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_
.. versionadded:: 0.9
.. versionchanged:: 1.0
Added `zone_variant` support.
:param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines
the timezone; if a ``tzinfo`` object is used, the
resulting display name will be generic, i.e.
independent of daylight savings time; if `None`, the
current date in UTC is assumed
:param width: either "long" or "short"
:param uncommon: deprecated and ignored
:param zone_variant: defines the zone variation to return. By default the
variation is defined from the datetime object
passed in. If no datetime object is passed in, the
``'generic'`` variation is assumed. The following
values are valid: ``'generic'``, ``'daylight'`` and
``'standard'``.
:param locale: the `Locale` object, or a locale string
"""
if dt_or_tzinfo is None:
dt = datetime.now()
tzinfo = LOCALTZ
elif isinstance(dt_or_tzinfo, string_types):
dt = None
tzinfo = get_timezone(dt_or_tzinfo)
elif isinstance(dt_or_tzinfo, integer_types):
dt = None
tzinfo = UTC
elif isinstance(dt_or_tzinfo, (datetime, time)):
dt = dt_or_tzinfo
if dt.tzinfo is not None:
tzinfo = dt.tzinfo
else:
tzinfo = UTC
else:
dt = None
tzinfo = dt_or_tzinfo
locale = Locale.parse(locale)
if hasattr(tzinfo, 'zone'):
zone = tzinfo.zone
else:
zone = tzinfo.tzname(dt)
if zone_variant is None:
if dt is None:
zone_variant = 'generic'
else:
dst = tzinfo.dst(dt)
if dst:
zone_variant = 'daylight'
else:
zone_variant = 'standard'
else:
if zone_variant not in ('generic', 'standard', 'daylight'):
raise ValueError('Invalid zone variation')
# Get the canonical time-zone code
zone = get_global('zone_aliases').get(zone, zone)
info = locale.time_zones.get(zone, {})
# Try explicitly translated zone names first
if width in info:
if zone_variant in info[width]:
return info[width][zone_variant]
metazone = get_global('meta_zones').get(zone)
if metazone:
metazone_info = locale.meta_zones.get(metazone, {})
if width in metazone_info:
if zone_variant in metazone_info[width]:
return metazone_info[width][zone_variant]
# If we have a concrete datetime, we assume that the result can't be
# independent of daylight savings time, so we return the GMT offset
if dt is not None:
return get_timezone_gmt(dt, width=width, locale=locale)
return get_timezone_location(dt_or_tzinfo, locale=locale)
def format_date(date=None, format='medium', locale=LC_TIME):
"""Return a date formatted according to the given pattern.
>>> d = date(2007, 4, 1)
>>> format_date(d, locale='en_US')
u'Apr 1, 2007'
>>> format_date(d, format='full', locale='de_DE')
u'Sonntag, 1. April 2007'
If you don't want to use the locale default formats, you can specify a
custom date pattern:
>>> format_date(d, "EEE, MMM d, ''yy", locale='en')
u"Sun, Apr 1, '07"
:param date: the ``date`` or ``datetime`` object; if `None`, the current
date is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param locale: a `Locale` object or a locale identifier
"""
if date is None:
date = date_.today()
elif isinstance(date, datetime):
date = date.date()
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_date_format(format, locale=locale)
pattern = parse_pattern(format)
return pattern.apply(date, locale)
def format_datetime(datetime=None, format='medium', tzinfo=None,
locale=LC_TIME):
r"""Return a date formatted according to the given pattern.
>>> dt = datetime(2007, 4, 1, 15, 30)
>>> format_datetime(dt, locale='en_US')
u'Apr 1, 2007, 3:30:00 PM'
For any pattern requiring the display of the time-zone, the third-party
``pytz`` package is needed to explicitly specify the time-zone:
>>> format_datetime(dt, 'full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'dimanche 1 avril 2007 \xe0 17:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
... tzinfo=get_timezone('US/Eastern'), locale='en')
u'2007.04.01 AD at 11:30:00 EDT'
:param datetime: the `datetime` object; if `None`, the current date and
time is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the timezone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
if datetime is None:
datetime = datetime_.utcnow()
elif isinstance(datetime, number_types):
datetime = datetime_.utcfromtimestamp(datetime)
elif isinstance(datetime, time):
datetime = datetime_.combine(date.today(), datetime)
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
if tzinfo is not None:
datetime = datetime.astimezone(get_timezone(tzinfo))
if hasattr(tzinfo, 'normalize'): # pytz
datetime = tzinfo.normalize(datetime)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
return get_datetime_format(format, locale=locale) \
.replace("'", "") \
.replace('{0}', format_time(datetime, format, tzinfo=None,
locale=locale)) \
.replace('{1}', format_date(datetime, format, locale=locale))
else:
return parse_pattern(format).apply(datetime, locale)
def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME):
r"""Return a time formatted according to the given pattern.
>>> t = time(15, 30)
>>> format_time(t, locale='en_US')
u'3:30:00 PM'
>>> format_time(t, format='short', locale='de_DE')
u'15:30'
If you don't want to use the locale default formats, you can specify a
custom time pattern:
>>> format_time(t, "hh 'o''clock' a", locale='en')
u"03 o'clock PM"
For any pattern requiring the display of the time-zone a
timezone has to be specified explicitly:
>>> t = datetime(2007, 4, 1, 15, 30)
>>> tzinfo = get_timezone('Europe/Paris')
>>> t = tzinfo.localize(t)
>>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
u'15:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
>>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=get_timezone('US/Eastern'),
... locale='en')
u"09 o'clock AM, Eastern Daylight Time"
As that example shows, when this function gets passed a
``datetime.datetime`` value, the actual time in the formatted string is
adjusted to the timezone specified by the `tzinfo` parameter. If the
``datetime`` is "naive" (i.e. it has no associated timezone information),
it is assumed to be in UTC.
These timezone calculations are **not** performed if the value is of type
``datetime.time``, as without date information there's no way to determine
what a given time would translate to in a different timezone without
information about whether daylight savings time is in effect or not. This
means that time values are left as-is, and the value of the `tzinfo`
parameter is only used to display the timezone name if needed:
>>> t = time(15, 30)
>>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'),
... locale='fr_FR')
u'15:30:00 heure normale d\u2019Europe centrale'
>>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'),
... locale='en_US')
u'3:30:00 PM Eastern Standard Time'
:param time: the ``time`` or ``datetime`` object; if `None`, the current
time in UTC is used
:param format: one of "full", "long", "medium", or "short", or a custom
date/time pattern
:param tzinfo: the time-zone to apply to the time for display
:param locale: a `Locale` object or a locale identifier
"""
if time is None:
time = datetime.utcnow()
elif isinstance(time, number_types):
time = datetime.utcfromtimestamp(time)
if time.tzinfo is None:
time = time.replace(tzinfo=UTC)
if isinstance(time, datetime):
if tzinfo is not None:
time = time.astimezone(tzinfo)
if hasattr(tzinfo, 'normalize'): # pytz
time = tzinfo.normalize(time)
time = time.timetz()
elif tzinfo is not None:
time = time.replace(tzinfo=tzinfo)
locale = Locale.parse(locale)
if format in ('full', 'long', 'medium', 'short'):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale)
TIMEDELTA_UNITS = (
('year', 3600 * 24 * 365),
('month', 3600 * 24 * 30),
('week', 3600 * 24 * 7),
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
('second', 1)
)
def format_timedelta(delta, granularity='second', threshold=.85,
add_direction=False, format='long',
locale=LC_TIME):
"""Return a time delta according to the rules of the given locale.
>>> format_timedelta(timedelta(weeks=12), locale='en_US')
u'3 months'
>>> format_timedelta(timedelta(seconds=1), locale='es')
u'1 segundo'
The granularity parameter can be provided to alter the lowest unit
presented, which defaults to a second.
>>> format_timedelta(timedelta(hours=3), granularity='day',
... locale='en_US')
u'1 day'
The threshold parameter can be used to determine at which value the
presentation switches to the next higher unit. A higher threshold factor
means the presentation will switch later. For example:
>>> format_timedelta(timedelta(hours=23), threshold=0.9, locale='en_US')
u'1 day'
>>> format_timedelta(timedelta(hours=23), threshold=1.1, locale='en_US')
u'23 hours'
In addition directional information can be provided that informs
the user if the date is in the past or in the future:
>>> format_timedelta(timedelta(hours=1), add_direction=True, locale='en')
u'in 1 hour'
>>> format_timedelta(timedelta(hours=-1), add_direction=True, locale='en')
u'1 hour ago'
The format parameter controls how compact or wide the presentation is:
>>> format_timedelta(timedelta(hours=3), format='short', locale='en')
u'3 hr'
>>> format_timedelta(timedelta(hours=3), format='narrow', locale='en')
u'3h'
:param delta: a ``timedelta`` object representing the time difference to
format, or the delta in seconds as an `int` value
:param granularity: determines the smallest unit that should be displayed,
the value can be one of "year", "month", "week", "day",
"hour", "minute" or "second"
:param threshold: factor that determines at which point the presentation
switches to the next higher unit
:param add_direction: if this flag is set to `True` the return value will
include directional information. For instance a
positive timedelta will include the information about
it being in the future, a negative will be information
about the value being in the past.
:param format: the format, can be "narrow", "short" or "long". (
"medium" is deprecated, currently converted to "long" to
maintain compatibility)
:param locale: a `Locale` object or a locale identifier
"""
if format not in ('narrow', 'short', 'medium', 'long'):
raise TypeError('Format must be one of "narrow", "short" or "long"')
if format == 'medium':
warnings.warn('"medium" value for format param of format_timedelta'
' is deprecated. Use "long" instead',
category=DeprecationWarning)
format = 'long'
if isinstance(delta, timedelta):
seconds = int((delta.days * 86400) + delta.seconds)
else:
seconds = delta
locale = Locale.parse(locale)
def _iter_patterns(a_unit):
if add_direction:
unit_rel_patterns = locale._data['date_fields'][a_unit]
if seconds >= 0:
yield unit_rel_patterns['future']
else:
yield unit_rel_patterns['past']
a_unit = 'duration-' + a_unit
yield locale._data['unit_patterns'].get(a_unit + ':' + format)
yield locale._data['unit_patterns'].get(a_unit)
for unit, secs_per_unit in TIMEDELTA_UNITS:
value = abs(seconds) / secs_per_unit
if value >= threshold or unit == granularity:
if unit == granularity and value > 0:
value = max(1, value)
value = int(round(value))
plural_form = locale.plural_form(value)
pattern = None
for patterns in _iter_patterns(unit):
if patterns is not None:
pattern = patterns[plural_form]
break
# This really should not happen
if pattern is None:
return u''
return pattern.replace('{0}', str(value))
return u''
def parse_date(string, locale=LC_TIME):
"""Parse a date from a string.
This function uses the date format for the locale as a hint to determine
the order in which the date fields appear in the string.
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string: the string containing the date
:param locale: a `Locale` object or a locale identifier
"""
# TODO: try ISO format first?
format = get_date_format(locale=locale).pattern.lower()
year_idx = format.index('y')
month_idx = format.index('m')
if month_idx < 0:
month_idx = format.index('l')
day_idx = format.index('d')
indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: this currently only supports numbers, but should also support month
# names, both in the requested locale, and english
numbers = re.findall('(\d+)', string)
year = numbers[indexes['Y']]
if len(year) == 2:
year = 2000 + int(year)
else:
year = int(year)
month = int(numbers[indexes['M']])
day = int(numbers[indexes['D']])
if month > 12:
month, day = day, month
return date(year, month, day)
def parse_time(string, locale=LC_TIME):
"""Parse a time from a string.
This function uses the time format for the locale as a hint to determine
the order in which the time fields appear in the string.
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string: the string containing the time
:param locale: a `Locale` object or a locale identifier
:return: the parsed time
:rtype: `time`
"""
# TODO: try ISO format first?
format = get_time_format(locale=locale).pattern.lower()
hour_idx = format.index('h')
if hour_idx < 0:
hour_idx = format.index('k')
min_idx = format.index('m')
sec_idx = format.index('s')
indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')]
indexes.sort()
indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)])
# FIXME: support 12 hour clock, and 0-based hour specification
# and seconds should be optional, maybe minutes too
# oh, and time-zones, of course
numbers = re.findall('(\d+)', string)
hour = int(numbers[indexes['H']])
minute = int(numbers[indexes['M']])
second = int(numbers[indexes['S']])
return time(hour, minute, second)
class DateTimePattern(object):
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.pattern)
def __unicode__(self):
return self.pattern
def __mod__(self, other):
if type(other) is not DateTimeFormat:
return NotImplemented
return self.format % other
def apply(self, datetime, locale):
return self % DateTimeFormat(datetime, locale)
class DateTimeFormat(object):
def __init__(self, value, locale):
assert isinstance(value, (date, datetime, time))
if isinstance(value, (datetime, time)) and value.tzinfo is None:
value = value.replace(tzinfo=UTC)
self.value = value
self.locale = Locale.parse(locale)
def __getitem__(self, name):
char = name[0]
num = len(name)
if char == 'G':
return self.format_era(char, num)
elif char in ('y', 'Y', 'u'):
return self.format_year(char, num)
elif char in ('Q', 'q'):
return self.format_quarter(char, num)
elif char in ('M', 'L'):
return self.format_month(char, num)
elif char in ('w', 'W'):
return self.format_week(char, num)
elif char == 'd':
return self.format(self.value.day, num)
elif char == 'D':
return self.format_day_of_year(num)
elif char == 'F':
return self.format_day_of_week_in_month()
elif char in ('E', 'e', 'c'):
return self.format_weekday(char, num)
elif char == 'a':
return self.format_period(char)
elif char == 'h':
if self.value.hour % 12 == 0:
return self.format(12, num)
else:
return self.format(self.value.hour % 12, num)
elif char == 'H':
return self.format(self.value.hour, num)
elif char == 'K':
return self.format(self.value.hour % 12, num)
elif char == 'k':
if self.value.hour == 0:
return self.format(24, num)
else:
return self.format(self.value.hour, num)
elif char == 'm':
return self.format(self.value.minute, num)
elif char == 's':
return self.format(self.value.second, num)
elif char == 'S':
return self.format_frac_seconds(num)
elif char == 'A':
return self.format_milliseconds_in_day(num)
elif char in ('z', 'Z', 'v', 'V'):
return self.format_timezone(char, num)
else:
raise KeyError('Unsupported date/time field %r' % char)
def format_era(self, char, num):
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
era = int(self.value.year >= 0)
return get_era_names(width, self.locale)[era]
def format_year(self, char, num):
value = self.value.year
if char.isupper():
week = self.get_week_number(self.get_day_of_year())
if week == 0:
value -= 1
year = self.format(value, num)
if num == 2:
year = year[-2:]
return year
def format_quarter(self, char, num):
quarter = (self.value.month - 1) // 3 + 1
if num <= 2:
return ('%%0%dd' % num) % quarter
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'Q': 'format', 'q': 'stand-alone'}[char]
return get_quarter_names(width, context, self.locale)[quarter]
def format_month(self, char, num):
if num <= 2:
return ('%%0%dd' % num) % self.value.month
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {'M': 'format', 'L': 'stand-alone'}[char]
return get_month_names(width, context, self.locale)[self.value.month]
def format_week(self, char, num):
if char.islower(): # week of year
day_of_year = self.get_day_of_year()
week = self.get_week_number(day_of_year)
if week == 0:
date = self.value - timedelta(days=day_of_year)
week = self.get_week_number(self.get_day_of_year(date),
date.weekday())
return self.format(week, num)
else: # week of month
week = self.get_week_number(self.value.day)
if week == 0:
date = self.value - timedelta(days=self.value.day)
week = self.get_week_number(date.day, date.weekday())
return '%d' % week
def format_weekday(self, char, num):
if num < 3:
if char.islower():
value = 7 - self.locale.first_week_day + self.value.weekday()
return self.format(value % 7 + 1, num)
num = 3
weekday = self.value.weekday()
width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num]
return get_day_names(width, context, self.locale)[weekday]
def format_day_of_year(self, num):
return self.format(self.get_day_of_year(), num)
def format_day_of_week_in_month(self):
return '%d' % ((self.value.day - 1) // 7 + 1)
def format_period(self, char):
period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)]
return get_period_names(locale=self.locale)[period]
def format_frac_seconds(self, num):
value = str(self.value.microsecond)
return self.format(round(float('.%s' % value), num) * 10**num, num)
def format_milliseconds_in_day(self, num):
msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
self.value.minute * 60000 + self.value.hour * 3600000
return self.format(msecs, num)
def format_timezone(self, char, num):
width = {3: 'short', 4: 'long'}[max(3, num)]
if char == 'z':
return get_timezone_name(self.value, width, locale=self.locale)
elif char == 'Z':
return get_timezone_gmt(self.value, width, locale=self.locale)
elif char == 'v':
return get_timezone_name(self.value.tzinfo, width,
locale=self.locale)
elif char == 'V':
if num == 1:
return get_timezone_name(self.value.tzinfo, width,
uncommon=True, locale=self.locale)
return get_timezone_location(self.value.tzinfo, locale=self.locale)
def format(self, value, length):
return ('%%0%dd' % length) % value
def get_day_of_year(self, date=None):
if date is None:
date = self.value
return (date - date.replace(month=1, day=1)).days + 1
def get_week_number(self, day_of_period, day_of_week=None):
"""Return the number of the week of a day within a period. This may be
the week number in a year or the week number in a month.
Usually this will return a value equal to or greater than 1, but if the
first week of the period is so short that it actually counts as the last
week of the previous period, this function will return 0.
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
>>> format.get_week_number(6)
1
>>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
>>> format.get_week_number(6)
2
:param day_of_period: the number of the day in the period (usually
either the day of month or the day of year)
:param day_of_week: the week day; if ommitted, the week day of the
current date is assumed
"""
if day_of_week is None:
day_of_week = self.value.weekday()
first_day = (day_of_week - self.locale.first_week_day -
day_of_period + 1) % 7
if first_day < 0:
first_day += 7
week_number = (day_of_period + first_day - 1) // 7
if 7 - first_day >= self.locale.min_week_days:
week_number += 1
return week_number
PATTERN_CHARS = {
'G': [1, 2, 3, 4, 5], # era
'y': None, 'Y': None, 'u': None, # year
'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter
'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month
'w': [1, 2], 'W': [1], # week
'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day
'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day
'a': [1], # period
'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone
}
def parse_pattern(pattern):
"""Parse date, time, and datetime format patterns.
>>> parse_pattern("MMMMd").format
u'%(MMMM)s%(d)s'
>>> parse_pattern("MMM d, yyyy").format
u'%(MMM)s %(d)s, %(yyyy)s'
Pattern can contain literal strings in single quotes:
>>> parse_pattern("H:mm' Uhr 'z").format
u'%(H)s:%(mm)s Uhr %(z)s'
An actual single quote can be used by using two adjacent single quote
characters:
>>> parse_pattern("hh' o''clock'").format
u"%(hh)s o'clock"
:param pattern: the formatting pattern to parse
"""
if type(pattern) is DateTimePattern:
return pattern
result = []
quotebuf = None
charbuf = []
fieldchar = ['']
fieldnum = [0]
def append_chars():
result.append(''.join(charbuf).replace('%', '%%'))
del charbuf[:]
def append_field():
limit = PATTERN_CHARS[fieldchar[0]]
if limit and fieldnum[0] not in limit:
raise ValueError('Invalid length for field: %r'
% (fieldchar[0] * fieldnum[0]))
result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0]))
fieldchar[0] = ''
fieldnum[0] = 0
for idx, char in enumerate(pattern.replace("''", '\0')):
if quotebuf is None:
if char == "'": # quote started
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
quotebuf = []
elif char in PATTERN_CHARS:
if charbuf:
append_chars()
if char == fieldchar[0]:
fieldnum[0] += 1
else:
if fieldchar[0]:
append_field()
fieldchar[0] = char
fieldnum[0] = 1
else:
if fieldchar[0]:
append_field()
charbuf.append(char)
elif quotebuf is not None:
if char == "'": # end of quote
charbuf.extend(quotebuf)
quotebuf = None
else: # inside quote
quotebuf.append(char)
if fieldchar[0]:
append_field()
elif charbuf:
append_chars()
return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
|
tinchoss/Python_Android
|
refs/heads/master
|
python/src/Misc/BeOS-setup.py
|
24
|
# Autodetecting setup.py script for building the Python extensions
#
# Modified for BeOS build. Donn Cave, March 27 2001.
__version__ = "special BeOS after 1.37"
import sys, os
from distutils import sysconfig
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = ['dbm', 'mmap', 'resource', 'nis']
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
filename = compiler.library_filename(libname, lib_type='shared')
result = find_file(filename, std_dirs, paths)
if result is not None: return result
filename = compiler.library_filename(libname, lib_type='static')
result = find_file(filename, std_dirs, paths)
return result
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
class PyBuildExt(build_ext):
def build_extensions(self):
# Detect which modules should be compiled
self.detect_modules()
# Remove modules that are present on the disabled list
self.extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
# Figure out the location of the source code for extension modules
moddir = os.path.join(os.getcwd(), srcdir, 'Modules')
moddir = os.path.normpath(moddir)
srcdir, tail = os.path.split(moddir)
srcdir = os.path.normpath(srcdir)
moddir = os.path.normpath(moddir)
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
for ext in self.extensions[:]:
ext.sources = [ os.path.join(moddir, filename)
for filename in ext.sources ]
ext.include_dirs.append( '.' ) # to get config.h
ext.include_dirs.append( os.path.join(srcdir, './Include') )
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup to figure out which modules are turned
# on in the file.
input = text_file.TextFile('Modules/Setup', join_lines=1)
remove_modules = []
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append( line[0] )
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
linker_so = os.environ.get('LDSHARED')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
args['compiler_so'] = compiler
if linker_so is not None:
args['linker_so'] = linker_so + ' -shared'
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
def get_platform (self):
# Get value of sys.platform
platform = sys.platform
if platform[:6] =='cygwin':
platform = 'cygwin'
elif platform[:4] =='beos':
platform = 'beos'
return platform
def detect_modules(self):
try:
belibs = os.environ['BELIBRARIES'].split(';')
except KeyError:
belibs = ['/boot/beos/system/lib']
belibs.append('/boot/home/config/lib')
self.compiler.library_dirs.append('/boot/home/config/lib')
try:
beincl = os.environ['BEINCLUDES'].split(';')
except KeyError:
beincl = []
beincl.append('/boot/home/config/include')
self.compiler.include_dirs.append('/boot/home/config/include')
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = belibs
inc_dirs = beincl
exts = []
platform = self.get_platform()
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['Darwin1.2', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
exts.append( Extension('_weakref', ['_weakref.c']) )
exts.append( Extension('_symtable', ['symtablemodule.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# access to the builtin codecs and codec registry
exts.append( Extension('_codecs', ['_codecsmodule.c']) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# access to ISO C locale support
exts.append( Extension('_locale', ['_localemodule.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
exts.append( Extension('fcntl', ['fcntlmodule.c']) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# posix (UNIX) errno values
exts.append( Extension('errno', ['errnomodule.c']) )
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# The md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The necessary files
# md5c.c and md5.h are included here.
exts.append( Extension('md5', ['md5module.c', 'md5c.c']) )
# The sha module implements the SHA checksum algorithm.
# (NIST's Secure Hash Algorithm.)
exts.append( Extension('sha', ['shamodule.c']) )
# Helper module for various ascii-encoders
exts.append( Extension('binascii', ['binascii.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Disabled on 64-bit platforms
if sys.maxint != 9223372036854775807L:
# Operations on audio samples
exts.append( Extension('audioop', ['audioop.c']) )
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
# Read SGI RGB image files (but coded portably)
exts.append( Extension('rgbimg', ['rgbimgmodule.c']) )
# readline
if self.compiler.find_library_file(lib_dirs, 'readline'):
readline_libs = ['readline']
if self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
libraries=readline_libs) )
# The crypt module is now disabled by default because it breaks builds
# on many systems (where -lcrypt is needed), e.g. Linux (I believe).
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# socket(2)
# Detect SSL support for the socket module
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
['/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
)
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_socket', ['socketmodule.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
define_macros = [('USE_SSL',1)] ) )
else:
exts.append( Extension('_socket', ['socketmodule.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# The standard Unix dbm module:
if platform not in ['cygwin']:
if (self.compiler.find_library_file(lib_dirs, 'ndbm')):
exts.append( Extension('dbm', ['dbmmodule.c'],
libraries = ['ndbm'] ) )
else:
exts.append( Extension('dbm', ['dbmmodule.c']) )
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if (self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
# Berkeley DB interface.
#
# This requires the Berkeley DB code, see
# ftp://ftp.cs.berkeley.edu/pub/4bsd/db.1.85.tar.gz
#
# Edit the variables DB and DBPORT to point to the db top directory
# and the subdirectory of PORT where you built it.
#
# (See http://electricrain.com/greg/python/bsddb3/ for an interface to
# BSD DB 3.x.)
dblib = []
if self.compiler.find_library_file(lib_dirs, 'db'):
dblib = ['db']
db185_incs = find_file('db_185.h', inc_dirs,
['/usr/include/db3', '/usr/include/db2'])
db_inc = find_file('db.h', inc_dirs, ['/usr/include/db1'])
if db185_incs is not None:
exts.append( Extension('bsddb', ['bsddbmodule.c'],
include_dirs = db185_incs,
define_macros=[('HAVE_DB_185_H',1)],
libraries = dblib ) )
elif db_inc is not None:
exts.append( Extension('bsddb', ['bsddbmodule.c'],
include_dirs = db_inc,
libraries = dblib) )
# Unix-only modules
if platform not in ['mac', 'win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['cygwin']:
exts.append( Extension('resource', ['resource.c']) )
# Generic dynamic loading module
#exts.append( Extension('dl', ['dlmodule.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if platform not in ['cygwin']:
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
# Curses support, requring the System V version of curses, often
# provided by the ncurses library.
if (self.compiler.find_library_file(lib_dirs, 'ncurses')):
curses_libs = ['ncurses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'curses')):
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
else:
curses_libs = ['curses', 'termcap']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
# If the curses module is enabled, check for the panel module
if (os.path.exists('Modules/_curses_panel.c') and
module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, 'panel')):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = ['panel'] + curses_libs) )
# Lee Busby's SIGFPE modules.
# The library to link fpectl with is platform specific.
# Choose *one* of the options below for fpectl:
if platform == 'irix5':
# For SGI IRIX (tested on 5.3):
exts.append( Extension('fpectl', ['fpectlmodule.c'],
libraries=['fpe']) )
elif 0: # XXX how to detect SunPro?
# For Solaris with SunPro compiler (tested on Solaris 2.5 with SunPro C 4.2):
# (Without the compiler you don't have -lsunmath.)
#fpectl fpectlmodule.c -R/opt/SUNWspro/lib -lsunmath -lm
pass
else:
# For other systems: see instructions in fpectlmodule.c.
#fpectl fpectlmodule.c ...
exts.append( Extension('fpectl', ['fpectlmodule.c']) )
# Andrew Kuchling's zlib module.
# This require zlib 1.1.3 (or later).
# See http://www.gzip.org/zlib/
if (self.compiler.find_library_file(lib_dirs, 'z')):
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z']) )
# Interface to the Expat XML parser
#
# Expat is written by James Clark and must be downloaded separately
# (see below). The pyexpat module was written by Paul Prescod after a
# prototype by Jack Jansen.
#
# The Expat dist includes Windows .lib and .dll files. Home page is
# at http://www.jclark.com/xml/expat.html, the current production
# release is always ftp://ftp.jclark.com/pub/xml/expat.zip.
#
# EXPAT_DIR, below, should point to the expat/ directory created by
# unpacking the Expat source distribution.
#
# Note: the expat build process doesn't yet build a libexpat.a; you
# can do this manually while we try convince the author to add it. To
# do so, cd to EXPAT_DIR, run "make" if you have not done so, then
# run:
#
# ar cr libexpat.a xmltok/*.o xmlparse/*.o
#
expat_defs = []
expat_incs = find_file('expat.h', inc_dirs, [])
if expat_incs is not None:
# expat.h was found
expat_defs = [('HAVE_EXPAT_H', 1)]
else:
expat_incs = find_file('xmlparse.h', inc_dirs, [])
if (expat_incs is not None and
self.compiler.find_library_file(lib_dirs, 'expat')):
exts.append( Extension('pyexpat', ['pyexpat.c'],
define_macros = expat_defs,
libraries = ['expat']) )
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Assume we haven't found any of the libraries or include files
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.4', '8.3', '8.2', '8.1', '8.0']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version )
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version )
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian, where
# they're put in /usr/include/{tcl,tk}X.Y
debian_tcl_include = [ '/usr/include/tcl' + version ]
debian_tk_include = [ '/usr/include/tk' + version ] + debian_tcl_include
tcl_includes = find_file('tcl.h', inc_dirs, debian_tcl_include)
tk_includes = find_file('tk.h', inc_dirs, debian_tk_include)
if (tcllib is None or tklib is None and
tcl_includes is None or tk_includes is None):
# Something's missing, so give up
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
platform = self.get_platform()
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs, 'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
# Add the Tcl/Tk libraries
libs.append('tk'+version)
libs.append('tcl'+version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def main():
setup(name = 'Python standard library',
version = '%d.%d' % sys.version_info[:2],
cmdclass = {'build_ext':PyBuildExt},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('struct', ['structmodule.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc']
)
# --install-platlib
if __name__ == '__main__':
sysconfig.set_python_build()
main()
|
abetusk/bostontraintrack
|
refs/heads/release
|
experimental/protob/protobuf-2.6.1/python/google/protobuf/internal/descriptor_pool_test.py
|
73
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_pool."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import os
import unittest
from google.apputils import basetest
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import descriptor_pool_test1_pb2
from google.protobuf.internal import descriptor_pool_test2_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
class DescriptorPoolTest(basetest.TestCase):
def setUp(self):
self.pool = descriptor_pool.DescriptorPool()
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(self.factory_test1_fd)
self.pool.Add(self.factory_test2_fd)
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEquals(name1, file_desc1.name)
self.assertEquals('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEquals(name2, file_desc2.name)
self.assertEquals('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEquals('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEquals('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEquals('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEquals('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEquals('Factory1Message', msg1.name)
self.assertEquals('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEquals(None, msg1.containing_type)
nested_msg1 = msg1.nested_types[0]
self.assertEquals('NestedFactory1Message', nested_msg1.name)
self.assertEquals(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEquals('NestedFactory1Enum', nested_enum1.name)
self.assertEquals(msg1, nested_enum1.containing_type)
self.assertEquals(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEquals(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEquals('Factory2Message', msg2.name)
self.assertEquals('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEquals('NestedFactory2Message', nested_msg2.name)
self.assertEquals(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEquals('NestedFactory2Enum', nested_enum2.name)
self.assertEquals(msg2, nested_enum2.containing_type)
self.assertEquals(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEquals(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEquals(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEquals(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEquals(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEquals(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEquals(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEquals(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEquals(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEquals(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEquals(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEquals(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEquals(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEquals(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEquals(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEquals(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testComplexNesting(self):
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
class ProtoFile(object):
def __init__(self, name, package, messages, dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEquals(self.name, file_desc.name)
test.assertEquals(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
class EnumType(object):
def __init__(self, values):
self.values = values
def CheckType(self, test, msg_desc, name, file_desc):
enum_desc = msg_desc.enum_types_by_name[name]
test.assertEqual(name, enum_desc.name)
expected_enum_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_enum_full_name, enum_desc.full_name)
test.assertEqual(msg_desc, enum_desc.containing_type)
test.assertEqual(file_desc, enum_desc.file)
for index, (value, number) in enumerate(self.values):
value_desc = enum_desc.values_by_name[value]
test.assertEqual(value, value_desc.name)
test.assertEqual(index, value_desc.index)
test.assertEqual(number, value_desc.number)
test.assertEqual(enum_desc, value_desc.type)
test.assertIn(value, msg_desc.enum_values_by_name)
class MessageType(object):
def __init__(self, type_dict, field_list, is_extendable=False,
extensions=None):
self.type_dict = type_dict
self.field_list = field_list
self.is_extendable = is_extendable
self.extensions = extensions or []
def CheckType(self, test, containing_type_desc, name, file_desc):
if containing_type_desc is None:
desc = file_desc.message_types_by_name[name]
expected_full_name = '.'.join([file_desc.package, name])
else:
desc = containing_type_desc.nested_types_by_name[name]
expected_full_name = '.'.join([containing_type_desc.full_name, name])
test.assertEqual(name, desc.name)
test.assertEqual(expected_full_name, desc.full_name)
test.assertEqual(containing_type_desc, desc.containing_type)
test.assertEqual(desc.file, file_desc)
test.assertEqual(self.is_extendable, desc.is_extendable)
for name, subtype in self.type_dict.items():
subtype.CheckType(test, desc, name, file_desc)
for index, (name, field) in enumerate(self.field_list):
field.CheckField(test, desc, name, index)
for index, (name, field) in enumerate(self.extensions):
field.CheckField(test, desc, name, index)
class EnumField(object):
def __init__(self, number, type_name, default_value):
self.number = number
self.type_name = type_name
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
enum_desc = msg_desc.enum_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(enum_desc.values_by_name[self.default_value].index,
field_desc.default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(enum_desc, field_desc.enum_type)
class MessageField(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
class StringField(object):
def __init__(self, number, default_value):
self.number = number
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(self.default_value, field_desc.default_value)
class ExtensionField(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
class AddDescriptorTest(basetest.TestCase):
def _TestMessage(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes').full_name)
# AddDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage')
pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes.NestedMessage',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
# Files are implicitly also indexed when messages are added.
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name)
def testMessage(self):
self._TestMessage('')
self._TestMessage('.')
def _TestEnum(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.ForeignEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum').full_name)
# AddEnumDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum.NestedEnum')
pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
self.assertEquals(
'protobuf_unittest.TestAllTypes.NestedEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
# Files are implicitly also indexed when enums are added.
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name)
def testEnum(self):
self._TestEnum('')
self._TestEnum('.')
def testFile(self):
pool = descriptor_pool.DescriptorPool()
pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR)
self.assertEquals(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
# AddFileDescriptor is not recursive; messages and enums within files must
# be explicitly registered.
with self.assertRaises(KeyError):
pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes')
TEST1_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test1.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest1': MessageType({
'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ETA')),
('nested_field', StringField(2, 'theta')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')),
('nested_field', StringField(2, 'beta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'BETA')),
('nested_message', MessageField(2, 'NestedMessage')),
], is_extendable=True),
'DescriptorPoolTest2': MessageType({
'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'MU')),
('nested_field', StringField(2, 'lambda')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')),
('nested_field', StringField(2, 'delta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')),
('nested_message', MessageField(2, 'NestedMessage')),
]),
})
TEST2_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test2.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest3': MessageType({
'NestedEnum': EnumType([('NU', 13), ('XI', 14)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'RHO')),
('nested_field', StringField(2, 'sigma')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'PI')),
('nested_field', StringField(2, 'nu')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'XI')),
('nested_message', MessageField(2, 'NestedMessage')),
], extensions=[
('descriptor_pool_test',
ExtensionField(1001, 'DescriptorPoolTest1')),
]),
},
dependencies=['google/protobuf/internal/descriptor_pool_test1.proto'])
if __name__ == '__main__':
basetest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.