repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/membership/__init__.py
|
114
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import models
from . import wizard
from . import report
|
kerr-huang/SL4A
|
refs/heads/master
|
python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/sessions.py
|
128
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# main python imports
import os
import time
import datetime
import random
import md5
import Cookie
import pickle
import __main__
from time import strftime
import logging
# google appengine imports
from google.appengine.ext import db
from google.appengine.api import memcache
#django simplejson import, used for flash
from django.utils import simplejson
from rotmodel import ROTModel
# settings, if you have these set elsewhere, such as your django settings file,
# you'll need to adjust the values to pull from there.
class _AppEngineUtilities_Session(db.Model):
"""
Model for the sessions in the datastore. This contains the identifier and
validation information for the session.
"""
sid = db.StringListProperty()
session_key = db.FloatProperty()
ip = db.StringProperty()
ua = db.StringProperty()
last_activity = db.DateTimeProperty()
dirty = db.BooleanProperty(default=False)
working = db.BooleanProperty(default=False)
deleted = db.BooleanProperty(default=False) # used for cases where
# datastore delete doesn't
# work
def put(self):
"""
Extend put so that it writes vaules to memcache as well as the datastore,
and keeps them in sync, even when datastore writes fails.
"""
if self.session_key:
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), self)
else:
# new session, generate a new key, which will handle the put and set the memcache
self.create_key()
self.last_activity = datetime.datetime.now()
try:
self.dirty = False
logging.info("doing a put")
db.put(self)
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), self)
except:
self.dirty = True
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), self)
return self
@classmethod
def get_session(cls, session_obj=None):
"""
Uses the passed sid to get a session object from memcache, or datastore
if a valid one exists.
"""
if session_obj.sid == None:
return None
session_key = session_obj.sid.split('_')[0]
session = memcache.get("_AppEngineUtilities_Session_" + str(session_key))
if session:
if session.deleted == True:
session.delete()
return None
if session.dirty == True and session.working != False:
# the working bit is used to make sure multiple requests, which can happen
# with ajax oriented sites, don't try to put at the same time
session.working = True
memcache.set("_AppEngineUtilities_Session_" + str(session_key), session)
session.put()
if session_obj.sid in session.sid:
logging.info('grabbed session from memcache')
sessionAge = datetime.datetime.now() - session.last_activity
if sessionAge.seconds > session_obj.session_expire_time:
session.delete()
return None
return session
else:
return None
# Not in memcache, check datastore
query = _AppEngineUtilities_Session.all()
query.filter("sid = ", session_obj.sid)
results = query.fetch(1)
if len(results) > 0:
sessionAge = datetime.datetime.now() - results[0].last_activity
if sessionAge.seconds > self.session_expire_time:
results[0].delete()
return None
memcache.set("_AppEngineUtilities_Session_" + str(session_key), results[0])
memcache.set("_AppEngineUtilities_SessionData_" + str(session_key), results[0].get_items_ds())
logging.info('grabbed session from datastore')
return results[0]
else:
return None
def get_items(self):
"""
Returns all the items stored in a session
"""
items = memcache.get("_AppEngineUtilities_SessionData_" + str(self.session_key))
if items:
for item in items:
if item.deleted == True:
item.delete()
items.remove(item)
return items
query = _AppEngineUtilities_SessionData.all()
query.filter('session_key', self.session_key)
results = query.fetch(1000)
return results
def get_item(self, keyname = None):
"""
Returns a single item from the memcache or datastore
"""
mc = memcache.get("_AppEngineUtilities_SessionData_" + str(self.session_key))
if mc:
for item in mc:
if item.keyname == keyname:
if item.deleted == True:
item.delete()
return None
return item
query = _AppEngineUtilities_SessionData.all()
query.filter("session_key = ", self.session_key)
query.filter("keyname = ", keyname)
results = query.fetch(1)
if len(results) > 0:
memcache.set("_AppEngineUtilities_SessionData_" + str(self.session_key), self.get_items_ds())
return results[0]
return None
def get_items_ds(self):
"""
This gets all the items straight from the datastore, does not
interact with the memcache.
"""
query = _AppEngineUtilities_SessionData.all()
query.filter('session_key', self.session_key)
results = query.fetch(1000)
return results
def delete(self):
try:
query = _AppEngineUtilities_SessionData.all()
query.filter("session_key = ", self.session_key)
results = query.fetch(1000)
db.delete(results)
db.delete(self)
memcache.delete_multi(["_AppEngineUtilities_Session_" + str(self.session_key), "_AppEngineUtilities_SessionData_" + str(self.session_key)])
except:
mc = memcache.get("_AppEngineUtilities_Session_" + str(self.session_key))
mc.deleted = True
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), mc)
def create_key(self):
"""
Creates a unique key for the session.
"""
self.session_key = time.time()
valid = False
while valid == False:
# verify session_key is unique
if memcache.get("_AppEngineUtilities_Session_" + str(self.session_key)):
self.session_key = self.session_key + 0.001
else:
query = _AppEngineUtilities_Session.all()
query.filter("session_key = ", self.session_key)
results = query.fetch(1)
if len(results) > 0:
self.session_key = self.session_key + 0.001
else:
try:
self.put()
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), self)
except:
self.dirty = True
memcache.set("_AppEngineUtilities_Session_" + str(self.session_key), self)
valid = True
class _AppEngineUtilities_SessionData(db.Model):
"""
Model for the session data in the datastore.
"""
session_key = db.FloatProperty()
keyname = db.StringProperty()
content = db.BlobProperty()
dirty = db.BooleanProperty(default=False)
deleted = db.BooleanProperty(default=False)
def put(self):
"""
Adds a keyname/value for session to the datastore and memcache
"""
# update or insert in datastore
try:
db.put(self)
self.dirty = False
except:
self.dirty = True
# update or insert in memcache
mc_items = memcache.get("_AppEngineUtilities_SessionData_" + str(self.session_key))
if mc_items:
value_updated = False
for item in mc_items:
if value_updated == True:
break
if item.keyname == self.keyname:
logging.info("updating " + self.keyname)
item.content = self.content
memcache.set("_AppEngineUtilities_SessionData_" + str(self.session_key), mc_items)
value_updated = True
break
if value_updated == False:
#logging.info("adding " + self.keyname)
mc_items.append(self)
memcache.set("_AppEngineUtilities_SessionData_" + str(self.session_key), mc_items)
def delete(self):
"""
Deletes an entity from the session in memcache and the datastore
"""
try:
db.delete(self)
except:
self.deleted = True
mc_items = memcache.get("_AppEngineUtilities_SessionData_" + str(self.session_key))
value_handled = False
for item in mc_items:
if value_handled == True:
break
if item.keyname == self.keyname:
if self.deleted == True:
item.deleted = True
else:
mc_items.remove(item)
memcache.set("_AppEngineUtilities_SessionData_" + str(self.session_key), mc_items)
class _DatastoreWriter(object):
def put(self, keyname, value, session):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
"""
keyname = session._validate_key(keyname)
if value is None:
raise ValueError('You must pass a value to put.')
# datestore write trumps cookie. If there is a cookie value
# with this keyname, delete it so we don't have conflicting
# entries.
if session.cookie_vals.has_key(keyname):
del(session.cookie_vals[keyname])
session.output_cookie[session.cookie_name + '_data'] = \
simplejson.dumps(session.cookie_vals)
print session.output_cookie.output()
sessdata = session._get(keyname=keyname)
if sessdata is None:
sessdata = _AppEngineUtilities_SessionData()
sessdata.session_key = session.session.session_key
sessdata.keyname = keyname
sessdata.content = pickle.dumps(value)
# UNPICKLING CACHE session.cache[keyname] = pickle.dumps(value)
session.cache[keyname] = value
sessdata.put()
# todo _set_memcache() should be going away when this is done
# session._set_memcache()
class _CookieWriter(object):
def put(self, keyname, value, session):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
"""
keyname = session._validate_key(keyname)
if value is None:
raise ValueError('You must pass a value to put.')
# Use simplejson for cookies instead of pickle.
session.cookie_vals[keyname] = value
# update the requests session cache as well.
session.cache[keyname] = value
session.output_cookie[session.cookie_name + '_data'] = \
simplejson.dumps(session.cookie_vals)
print session.output_cookie.output()
class Session(object):
"""
Sessions used to maintain user presence between requests.
Sessions store a unique id as a cookie in the browser and
referenced in a datastore object. This maintains user presence
by validating requests as visits from the same browser.
You can add extra data to the session object by using it
as a dictionary object. Values can be any python object that
can be pickled.
For extra performance, session objects are also store in
memcache and kept consistent with the datastore. This
increases the performance of read requests to session
data.
"""
COOKIE_NAME = 'appengine-utilities-session-sid' # session token
DEFAULT_COOKIE_PATH = '/'
SESSION_EXPIRE_TIME = 7200 # sessions are valid for 7200 seconds (2 hours)
CLEAN_CHECK_PERCENT = 50 # By default, 50% of all requests will clean the database
INTEGRATE_FLASH = True # integrate functionality from flash module?
CHECK_IP = True # validate sessions by IP
CHECK_USER_AGENT = True # validate sessions by user agent
SET_COOKIE_EXPIRES = True # Set to True to add expiration field to cookie
SESSION_TOKEN_TTL = 5 # Number of seconds a session token is valid for.
UPDATE_LAST_ACTIVITY = 60 # Number of seconds that may pass before
# last_activity is updated
WRITER = "datastore" # Use the datastore writer by default. cookie is the
# other option.
def __init__(self, cookie_path=DEFAULT_COOKIE_PATH,
cookie_name=COOKIE_NAME,
session_expire_time=SESSION_EXPIRE_TIME,
clean_check_percent=CLEAN_CHECK_PERCENT,
integrate_flash=INTEGRATE_FLASH, check_ip=CHECK_IP,
check_user_agent=CHECK_USER_AGENT,
set_cookie_expires=SET_COOKIE_EXPIRES,
session_token_ttl=SESSION_TOKEN_TTL,
last_activity_update=UPDATE_LAST_ACTIVITY,
writer=WRITER):
"""
Initializer
Args:
cookie_name: The name for the session cookie stored in the browser.
session_expire_time: The amount of time between requests before the
session expires.
clean_check_percent: The percentage of requests the will fire off a
cleaning routine that deletes stale session data.
integrate_flash: If appengine-utilities flash utility should be
integrated into the session object.
check_ip: If browser IP should be used for session validation
check_user_agent: If the browser user agent should be used for
sessoin validation.
set_cookie_expires: True adds an expires field to the cookie so
it saves even if the browser is closed.
session_token_ttl: Number of sessions a session token is valid
for before it should be regenerated.
"""
self.cookie_path = cookie_path
self.cookie_name = cookie_name
self.session_expire_time = session_expire_time
self.integrate_flash = integrate_flash
self.check_user_agent = check_user_agent
self.check_ip = check_ip
self.set_cookie_expires = set_cookie_expires
self.session_token_ttl = session_token_ttl
self.last_activity_update = last_activity_update
self.writer = writer
# make sure the page is not cached in the browser
self.no_cache_headers()
# Check the cookie and, if necessary, create a new one.
self.cache = {}
string_cookie = os.environ.get('HTTP_COOKIE', '')
self.cookie = Cookie.SimpleCookie()
self.output_cookie = Cookie.SimpleCookie()
self.cookie.load(string_cookie)
try:
self.cookie_vals = \
simplejson.loads(self.cookie[self.cookie_name + '_data'].value)
# sync self.cache and self.cookie_vals which will make those
# values available for all gets immediately.
for k in self.cookie_vals:
self.cache[k] = self.cookie_vals[k]
self.output_cookie[self.cookie_name + '_data'] = self.cookie[self.cookie_name + '_data']
# sync the input cookie with the output cookie
except:
self.cookie_vals = {}
if writer == "cookie":
pass
else:
self.sid = None
new_session = True
# do_put is used to determine if a datastore write should
# happen on this request.
do_put = False
# check for existing cookie
if self.cookie.get(cookie_name):
self.sid = self.cookie[cookie_name].value
self.session = _AppEngineUtilities_Session.get_session(self) # will return None if
# sid expired
if self.session:
new_session = False
if new_session:
# start a new session
self.session = _AppEngineUtilities_Session()
self.session.put()
self.sid = self.new_sid()
if 'HTTP_USER_AGENT' in os.environ:
self.session.ua = os.environ['HTTP_USER_AGENT']
else:
self.session.ua = None
if 'REMOTE_ADDR' in os.environ:
self.session.ip = os.environ['REMOTE_ADDR']
else:
self.session.ip = None
self.session.sid = [self.sid]
# do put() here to get the session key
self.session.put()
else:
# check the age of the token to determine if a new one
# is required
duration = datetime.timedelta(seconds=self.session_token_ttl)
session_age_limit = datetime.datetime.now() - duration
if self.session.last_activity < session_age_limit:
logging.info("UPDATING SID LA = " + str(self.session.last_activity) + " - TL = " + str(session_age_limit))
self.sid = self.new_sid()
if len(self.session.sid) > 2:
self.session.sid.remove(self.session.sid[0])
self.session.sid.append(self.sid)
do_put = True
else:
self.sid = self.session.sid[-1]
# check if last_activity needs updated
ula = datetime.timedelta(seconds=self.last_activity_update)
if datetime.datetime.now() > self.session.last_activity + ula:
do_put = True
self.output_cookie[cookie_name] = self.sid
self.output_cookie[cookie_name]['path'] = cookie_path
# UNPICKLING CACHE self.cache['sid'] = pickle.dumps(self.sid)
self.cache['sid'] = self.sid
if do_put:
if self.sid != None or self.sid != "":
logging.info("doing put")
self.session.put()
if self.set_cookie_expires:
if not self.output_cookie.has_key(cookie_name + '_data'):
self.output_cookie[cookie_name + '_data'] = ""
self.output_cookie[cookie_name + '_data']['expires'] = \
self.session_expire_time
print self.output_cookie.output()
# fire up a Flash object if integration is enabled
if self.integrate_flash:
import flash
self.flash = flash.Flash(cookie=self.cookie)
# randomly delete old stale sessions in the datastore (see
# CLEAN_CHECK_PERCENT variable)
if random.randint(1, 100) < clean_check_percent:
self._clean_old_sessions()
def new_sid(self):
"""
Create a new session id.
"""
sid = str(self.session.session_key) + "_" +md5.new(repr(time.time()) + \
str(random.random())).hexdigest()
return sid
'''
# removed as model now has get_session classmethod
def _get_session(self):
"""
Get the user's session from the datastore
"""
query = _AppEngineUtilities_Session.all()
query.filter('sid', self.sid)
if self.check_user_agent:
query.filter('ua', os.environ['HTTP_USER_AGENT'])
if self.check_ip:
query.filter('ip', os.environ['REMOTE_ADDR'])
results = query.fetch(1)
if len(results) is 0:
return None
else:
sessionAge = datetime.datetime.now() - results[0].last_activity
if sessionAge.seconds > self.session_expire_time:
results[0].delete()
return None
return results[0]
'''
def _get(self, keyname=None):
"""
Return all of the SessionData object data from the datastore onlye,
unless keyname is specified, in which case only that instance of
SessionData is returned.
Important: This does not interact with memcache and pulls directly
from the datastore. This also does not get items from the cookie
store.
Args:
keyname: The keyname of the value you are trying to retrieve.
"""
if keyname != None:
return self.session.get_item(keyname)
return self.session.get_items()
"""
OLD
query = _AppEngineUtilities_SessionData.all()
query.filter('session', self.session)
if keyname != None:
query.filter('keyname =', keyname)
results = query.fetch(1000)
if len(results) is 0:
return None
if keyname != None:
return results[0]
return results
"""
def _validate_key(self, keyname):
"""
Validate the keyname, making sure it is set and not a reserved name.
"""
if keyname is None:
raise ValueError('You must pass a keyname for the session' + \
' data content.')
elif keyname in ('sid', 'flash'):
raise ValueError(keyname + ' is a reserved keyname.')
if type(keyname) != type([str, unicode]):
return str(keyname)
return keyname
def _put(self, keyname, value):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
"""
if self.writer == "datastore":
writer = _DatastoreWriter()
else:
writer = _CookieWriter()
writer.put(keyname, value, self)
def _delete_session(self):
"""
Delete the session and all session data.
"""
if hasattr(self, "session"):
self.session.delete()
self.cookie_vals = {}
self.cache = {}
self.output_cookie[self.cookie_name + '_data'] = \
simplejson.dumps(self.cookie_vals)
print self.output_cookie.output()
"""
OLD
if hasattr(self, "session"):
sessiondata = self._get()
# delete from datastore
if sessiondata is not None:
for sd in sessiondata:
sd.delete()
# delete from memcache
memcache.delete('sid-'+str(self.session.key()))
# delete the session now that all items that reference it are deleted.
self.session.delete()
# unset any cookie values that may exist
self.cookie_vals = {}
self.cache = {}
self.output_cookie[self.cookie_name + '_data'] = \
simplejson.dumps(self.cookie_vals)
print self.output_cookie.output()
"""
# if the event class has been loaded, fire off the sessionDeleted event
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('sessionDelete')
def delete(self):
"""
Delete the current session and start a new one.
This is useful for when you need to get rid of all data tied to a
current session, such as when you are logging out a user.
"""
self._delete_session()
@classmethod
def delete_all_sessions(cls):
"""
Deletes all sessions and session data from the data store and memcache:
NOTE: This is not fully developed. It also will not delete any cookie
data as this does not work for each incoming request. Keep this in mind
if you are using the cookie writer.
"""
all_sessions_deleted = False
all_data_deleted = False
while not all_sessions_deleted:
query = _AppEngineUtilities_Session.all()
results = query.fetch(75)
if len(results) is 0:
all_sessions_deleted = True
else:
for result in results:
result.delete()
def _clean_old_sessions(self):
"""
Delete expired sessions from the datastore.
This is only called for CLEAN_CHECK_PERCENT percent of requests because
it could be rather intensive.
"""
duration = datetime.timedelta(seconds=self.session_expire_time)
session_age = datetime.datetime.now() - duration
query = _AppEngineUtilities_Session.all()
query.filter('last_activity <', session_age)
results = query.fetch(50)
for result in results:
"""
OLD
data_query = _AppEngineUtilities_SessionData.all()
data_query.filter('session', result)
data_results = data_query.fetch(1000)
for data_result in data_results:
data_result.delete()
memcache.delete('sid-'+str(result.key()))
"""
result.delete()
# Implement Python container methods
def __getitem__(self, keyname):
"""
Get item from session data.
keyname: The keyname of the mapping.
"""
# flash messages don't go in the datastore
if self.integrate_flash and (keyname == 'flash'):
return self.flash.msg
if keyname in self.cache:
# UNPICKLING CACHE return pickle.loads(str(self.cache[keyname]))
return self.cache[keyname]
if keyname in self.cookie_vals:
return self.cookie_vals[keyname]
if hasattr(self, "session"):
data = self._get(keyname)
if data:
#UNPICKLING CACHE self.cache[keyname] = data.content
self.cache[keyname] = pickle.loads(data.content)
return pickle.loads(data.content)
else:
raise KeyError(str(keyname))
raise KeyError(str(keyname))
def __setitem__(self, keyname, value):
"""
Set item in session data.
Args:
keyname: They keyname of the mapping.
value: The value of mapping.
"""
if self.integrate_flash and (keyname == 'flash'):
self.flash.msg = value
else:
keyname = self._validate_key(keyname)
self.cache[keyname] = value
# self._set_memcache() # commented out because this is done in the datestore put
return self._put(keyname, value)
def delete_item(self, keyname, throw_exception=False):
"""
Delete item from session data, ignoring exceptions if
necessary.
Args:
keyname: The keyname of the object to delete.
throw_exception: false if exceptions are to be ignored.
Returns:
Nothing.
"""
if throw_exception:
self.__delitem__(keyname)
return None
else:
try:
self.__delitem__(keyname)
except KeyError:
return None
def __delitem__(self, keyname):
"""
Delete item from session data.
Args:
keyname: The keyname of the object to delete.
"""
bad_key = False
sessdata = self._get(keyname = keyname)
if sessdata is None:
bad_key = True
else:
sessdata.delete()
if keyname in self.cookie_vals:
del self.cookie_vals[keyname]
bad_key = False
self.output_cookie[self.cookie_name + '_data'] = \
simplejson.dumps(self.cookie_vals)
print self.output_cookie.output()
if bad_key:
raise KeyError(str(keyname))
if keyname in self.cache:
del self.cache[keyname]
def __len__(self):
"""
Return size of session.
"""
# check memcache first
if hasattr(self, "session"):
results = self._get()
if results is not None:
return len(results) + len(self.cookie_vals)
else:
return 0
return len(self.cookie_vals)
def __contains__(self, keyname):
"""
Check if an item is in the session data.
Args:
keyname: The keyname being searched.
"""
try:
r = self.__getitem__(keyname)
except KeyError:
return False
return True
def __iter__(self):
"""
Iterate over the keys in the session data.
"""
# try memcache first
if hasattr(self, "session"):
for k in self._get():
yield k.keyname
for k in self.cookie_vals:
yield k
def __str__(self):
"""
Return string representation.
"""
#if self._get():
return '{' + ', '.join(['"%s" = "%s"' % (k, self[k]) for k in self]) + '}'
#else:
# return []
'''
OLD
def _set_memcache(self):
"""
Set a memcache object with all the session data. Optionally you can
add a key and value to the memcache for put operations.
"""
# Pull directly from the datastore in order to ensure that the
# information is as up to date as possible.
if self.writer == "datastore":
data = {}
sessiondata = self._get()
if sessiondata is not None:
for sd in sessiondata:
data[sd.keyname] = pickle.loads(sd.content)
memcache.set('sid-'+str(self.session.key()), data, \
self.session_expire_time)
'''
def cycle_key(self):
"""
Changes the session id.
"""
self.sid = self.new_sid()
if len(self.session.sid) > 2:
self.session.sid.remove(self.session.sid[0])
self.session.sid.append(self.sid)
def flush(self):
"""
Delete's the current session, creating a new one.
"""
self._delete_session()
self.__init__()
def no_cache_headers(self):
"""
Adds headers, avoiding any page caching in the browser. Useful for highly
dynamic sites.
"""
print "Expires: Tue, 03 Jul 2001 06:00:00 GMT"
print strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z")
print "Cache-Control: no-store, no-cache, must-revalidate, max-age=0"
print "Cache-Control: post-check=0, pre-check=0"
print "Pragma: no-cache"
def clear(self):
"""
Remove all items
"""
sessiondata = self._get()
# delete from datastore
if sessiondata is not None:
for sd in sessiondata:
sd.delete()
# delete from memcache
self.cache = {}
self.cookie_vals = {}
self.output_cookie[self.cookie_name + '_data'] = \
simplejson.dumps(self.cookie_vals)
print self.output_cookie.output()
def has_key(self, keyname):
"""
Equivalent to k in a, use that form in new code
"""
return self.__contains__(keyname)
def items(self):
"""
A copy of list of (key, value) pairs
"""
op = {}
for k in self:
op[k] = self[k]
return op
def keys(self):
"""
List of keys.
"""
l = []
for k in self:
l.append(k)
return l
def update(*dicts):
"""
Updates with key/value pairs from b, overwriting existing keys, returns None
"""
for dict in dicts:
for k in dict:
self._put(k, dict[k])
return None
def values(self):
"""
A copy list of values.
"""
v = []
for k in self:
v.append(self[k])
return v
def get(self, keyname, default = None):
"""
a[k] if k in a, else x
"""
try:
return self.__getitem__(keyname)
except KeyError:
if default is not None:
return default
return None
def setdefault(self, keyname, default = None):
"""
a[k] if k in a, else x (also setting it)
"""
try:
return self.__getitem__(keyname)
except KeyError:
if default is not None:
self.__setitem__(keyname, default)
return default
return None
@classmethod
def check_token(cls, cookie_name=COOKIE_NAME, delete_invalid=True):
"""
Retrieves the token from a cookie and validates that it is
a valid token for an existing cookie. Cookie validation is based
on the token existing on a session that has not expired.
This is useful for determining if datastore or cookie writer
should be used in hybrid implementations.
Args:
cookie_name: Name of the cookie to check for a token.
delete_invalid: If the token is not valid, delete the session
cookie, to avoid datastore queries on future
requests.
Returns True/False
NOTE: TODO This currently only works when the datastore is working, which of course
is pointless for applications using the django middleware. This needs to be resolved
before merging back into the main project.
"""
string_cookie = os.environ.get('HTTP_COOKIE', '')
cookie = Cookie.SimpleCookie()
cookie.load(string_cookie)
if cookie.has_key(cookie_name):
query = _AppEngineUtilities_Session.all()
query.filter('sid', cookie[cookie_name].value)
results = query.fetch(1)
if len(results) > 0:
return True
else:
if delete_invalid:
output_cookie = Cookie.SimpleCookie()
output_cookie[cookie_name] = cookie[cookie_name]
output_cookie[cookie_name]['expires'] = 0
print output_cookie.output()
return False
|
M4rtinK/tsubame
|
refs/heads/master
|
core/bundle/future/moves/tkinter/scrolledtext.py
|
118
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from tkinter.scrolledtext import *
else:
try:
from ScrolledText import *
except ImportError:
raise ImportError('The ScrolledText module is missing. Does your Py2 '
'installation include tkinter?')
|
bohlian/frappe
|
refs/heads/develop
|
frappe/data_migration/doctype/data_migration_mapping/data_migration_mapping.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DataMigrationMapping(Document):
def get_filters(self):
if self.condition:
return frappe.safe_eval(self.condition, dict(frappe=frappe))
def get_fields(self):
fields = []
for f in self.fields:
if not (f.local_fieldname[0] in ('"', "'") or f.local_fieldname.startswith('eval:')):
fields.append(f.local_fieldname)
if frappe.db.has_column(self.local_doctype, self.migration_id_field):
fields.append(self.migration_id_field)
if 'name' not in fields:
fields.append('name')
return fields
def get_mapped_record(self, doc):
mapped = frappe._dict()
key_fieldname = 'remote_fieldname'
value_fieldname = 'local_fieldname'
if self.mapping_type == 'Pull':
key_fieldname, value_fieldname = value_fieldname, key_fieldname
for field_map in self.fields:
if not field_map.is_child_table:
value = get_value_from_fieldname(field_map, value_fieldname, doc)
mapped[field_map.get(key_fieldname)] = value
else:
mapping_name = field_map.child_table_mapping
value = get_mapped_child_records(mapping_name, doc.get(field_map.get(value_fieldname)))
mapped[field_map.get(key_fieldname)] = value
return mapped
def get_mapped_child_records(mapping_name, child_docs):
mapped_child_docs = []
mapping = frappe.get_doc('Data Migration Mapping', mapping_name)
for child_doc in child_docs:
mapped_child_docs.append(mapping.get_mapped_record(child_doc))
return mapped_child_docs
def get_value_from_fieldname(field_map, fieldname_field, doc):
field_name = field_map.get(fieldname_field)
if field_name.startswith('eval:'):
value = frappe.safe_eval(field_name[5:], dict(frappe=frappe))
elif field_name[0] in ('"', "'"):
value = field_name[1:-1]
else:
value = doc.get(field_name)
return value
|
bigswitch/snac-nox-zaku
|
refs/heads/zaku
|
src/nox/lib/packet/t/icmp_parse_test.py
|
10
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
import array
from nox.lib.packet import *
from nox.lib.packet.ethernet import *
from nox.coreapps.testharness.testdefs import *
icmp_echo_str = \
"""\
\x00\x1a\xa0\x8f\x9a\x95\x00\xa0\xcc\x28\xa9\x94\x08\x00\x45\x00\
\x00\x54\x00\x00\x40\x00\x40\x01\xb7\x39\xc0\xa8\x01\x0c\xc0\xa8\
\x01\x13\x08\x00\x2b\xde\x6d\x73\x00\x01\x72\xf9\xb4\x47\x41\x69\
\x0b\x00\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\
\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\
\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\
\x36\x37\
"""
icmp_unreach_str = \
"""\
\x00\xa0\xcc\x28\xa9\x94\x00\x1a\x92\x40\xac\x05\x08\x00\x45\xc0\
\x00\x38\x13\x77\x00\x00\x40\x01\xe3\x23\xc0\xa8\x01\x0e\xc0\xa8\
\x01\x0c\x03\x03\x80\x81\x00\x00\x00\x00\x45\x00\x00\x1c\x8e\x60\
\x00\x00\x40\x11\x69\x06\xc0\xa8\x01\x0c\xc0\xa8\x01\x0e\x07\xd6\
\x14\xca\x00\x08\x5f\xd3\
"""
def testICMPEcho():
eth = ethernet(icmp_echo_str)
icmph = eth.find('icmp')
nox_test_assert(icmph, 'icmp parse')
nox_test_assert(icmph.type == 8)
nox_test_assert(icmph.csum == 0x2bde)
echoh = eth.find('echo')
nox_test_assert(echoh)
nox_test_assert(echoh.id == 0x6d73)
nox_test_assert(echoh.seq == 1)
def testICMPEchoConstruct():
eth = ethernet(icmp_echo_str)
nox_test_assert(eth.tostring() == icmp_echo_str)
def testICMPUnreach():
eth = ethernet(icmp_unreach_str)
icmph = eth.find('icmp')
nox_test_assert(icmph)
unreach = eth.find('unreach')
nox_test_assert(unreach)
iph = unreach.find('ipv4')
nox_test_assert(iph)
udph = unreach.find('udp')
nox_test_assert(udph)
nox_test_assert(icmph.type == 3)
nox_test_assert(icmph.code == 3)
nox_test_assert(icmph.csum == 0x8081)
nox_test_assert(ip_to_str(iph.srcip) == "192.168.1.12")
nox_test_assert(ip_to_str(iph.dstip) == "192.168.1.14")
nox_test_assert(udph.srcport == 2006)
nox_test_assert(udph.dstport == 5322)
def testICMPUnreachConstruct():
eth = ethernet(icmp_unreach_str)
nox_test_assert(eth.tostring() == icmp_unreach_str)
|
mitya57/django
|
refs/heads/master
|
tests/sitemaps_tests/test_http.py
|
22
|
import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
from .models import TestModel
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_last_modified(self):
"Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-descending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_ascending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/ascending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
def test_sitemaps_lastmod_descending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/descending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Priorities haven't been rendered in localized format.
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Hitting the flatpages sitemap without the sites framework installed
# doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode(), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
with self.assertRaises(ImproperlyConfigured):
Sitemap().get_urls()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
with self.assertRaises(ImproperlyConfigured):
Sitemap().get_urls()
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = Sitemap()
test_sitemap.items = TestModel.objects.order_by('pk').all
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
A cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_without_entries(self):
response = self.client.get('/sitemap-without-entries/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
</urlset>"""
self.assertXMLEqual(response.content.decode(), expected_content)
|
ramusus/django-facebook-pages
|
refs/heads/master
|
facebook_pages/migrations/0003_auto__add_field_page_posts_count.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.posts_count'
db.add_column('facebook_pages_page', 'posts_count',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.posts_count'
db.delete_column('facebook_pages_page', 'posts_count')
models = {
'facebook_pages.page': {
'Meta': {'ordering': "['name']", 'object_name': 'Page'},
'about': ('django.db.models.fields.TextField', [], {}),
'can_post': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'checkins': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'company_overview': ('django.db.models.fields.TextField', [], {}),
'cover': ('annoying.fields.JSONField', [], {'null': 'True'}),
'graph_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '100'}),
'location': ('annoying.fields.JSONField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'posts_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'talking_about_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['facebook_pages']
|
azverkan/scons
|
refs/heads/master
|
test/Deprecated/SourceSignatures/overrides.py
|
5
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure that SourceSignatures() works when overrides are used on a
Builder call. (Previous implementations used methods that would stay
bound to the underlying construction environment, which in this case
meant ignoring the 'timestamp' setting and still using the underlying
content signature.)
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
expect = TestSCons.re_escape("""
scons: warning: The env.SourceSignatures() method is deprecated;
\tconvert your build to use the env.Decider() method instead.
""") + TestSCons.file_expr
test.write('SConstruct', """\
SetOption('warn', 'deprecated-source-signatures')
DefaultEnvironment().SourceSignatures('MD5')
env = Environment()
env.SourceSignatures('timestamp')
env.Command('foo.out', 'foo.in', Copy('$TARGET', '$SOURCE'), FOO=1)
""")
test.write('foo.in', "foo.in 1\n")
test.run(arguments = 'foo.out', stderr = expect)
test.sleep()
test.write('foo.in', "foo.in 1\n")
test.not_up_to_date(arguments = 'foo.out', stderr = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache/incubator-allura
|
refs/heads/master
|
ForgeSVN/forgesvn/tests/__init__.py
|
3
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Make our own SVN tool test decorator
from allura.tests.decorators import with_tool
with_svn = with_tool('test', 'SVN', 'src', 'SVN')
|
mirjalil/IMDB-reviews
|
refs/heads/master
|
code/classify_NB.exhaustive_GSCV.py
|
1
|
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import scipy
import sklearn
import sys
### Get the parameters from the command-line arguments
assert(len(sys.argv)==7)
param_dict = {
'max_df': float(sys.argv[1]),
'sublinear_tf': bool(sys.argv[2]),
'max_features': int(sys.argv[3]),
'ngram_range': (int(sys.argv[4]), int(sys.argv[5])),
'alpha': float(sys.argv[6])
}
for k,v in param_dict.items():
print("Param %-16s: %s"%(k, v))
# ## 1. Read the training and test dataset
df = pd.read_table('data/labeledTrainData.tsv')
print(df.head())
df_test = pd.read_table('data/testData.tsv')
print(df_test.head())
# ### 1.1 Extracting X & y data columns
data_train = df.loc[:, 'review']
y_train = df.loc[:, 'sentiment']
print(data_train.head())
data_test = df_test.loc[:, 'review']
print(data_test.tail())
# ## 2. Text Feature Extraction
import nltk
import string
import re
from nltk.corpus import stopwords
# ### 2.1 Tokenizer Function
# **Transform to lower-case**
# **Remove the punctuations**
# **Remove the stopwrods**
# **Tokenize the remaining string**
## For more info, see http://www.cs.duke.edu/courses/spring14/compsci290/assignments/lab02.html
stemmer = nltk.stem.porter.PorterStemmer()
def get_tokens(inp_txt):
## Lower case: ABC -> abc
txt_lower = inp_txt.lower()
## Remove punctuations (!, ', ", ., :, ;, )
#txt_lower_nopunct = txt_lower.translate(string.maketrans("",""), string.punctuation)
#print(txt_lower_nopunct)
## Tokenize:
tokens = nltk.word_tokenize(txt_lower) #_nopunct)
#tokens = nltk.wordpunct_tokenize(txt_lower)
## remove stop-words:
tokens_filtered = [w for w in tokens if not w in stopwords.words('english')]
## stemming:
stems = [stemmer.stem(t) for t in tokens_filtered]
stems_nopunct = [s for s in stems if re.match('^[a-zA-Z]+$', s) is not None]
return (stems_nopunct)
## Note: you need to download punkt and stopwords package in nltk:
# import nltk
# nltk.download(punkt)
from sklearn import pipeline
from sklearn import metrics
from sklearn import cross_validation
from sklearn.naive_bayes import MultinomialNB
import datetime
import gc # python's garbage collector
# ## 4. Extensive Grid Search
tfidf = sklearn.feature_extraction.text.TfidfVectorizer(
encoding = 'utf-8',
decode_error = 'replace',
strip_accents = 'ascii',
analyzer = 'word',
smooth_idf = True,
max_df = param_dict['maxdf'],
sublinear_tf = param_dict['sublinear_tf'],
max_features = param_dict['max_features'],
ngram_range = param_dict['ngram_range'],
tokenizer = get_tokens
)
param_grid = {
'vect__max_df':[0.2, 0.4, 0.5, 0.6, 0.8],
'vect__sublinear_tf':[True, False],
'vect__max_features':[5000, 10000, 20000, 40000, None],
'vect__ngram_range':[(1,1), (1,2), (1,3)],
'clf__alpha':[0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 10.0]
}
clf_pipe = pipeline.Pipeline([
('vect', tfidf),
('clf', MultinomialNB(alpha = param_dict['alpha']))
])
print(clf_pipe.get_params())
auc_scorer = metrics.make_scorer(metrics.roc_auc_score, greater_is_better=True)
#grs = grid_search.GridSearchCV(clf_pipe,
# param_grid,
# n_jobs=-1,
# verbose=1,
# scoring=auc_scorer,
# cv=5
#)
#
#grs.fit(data_train, y_train)
#
#print("Best score: %0.3f" % grs.best_score_)
#print("Best parameters set:")
#best_param = grs.best_estimator_.get_params()
#for param_name in sorted(param_grid.keys()):
# print("\t%s: %r" % (param_name, best_param[param_name]))
cv = cross_validation.StratifiedKFold(y_train, n_folds=5)
auc_res = 0
for i, (train_inx, test_inx) in enumerate(cv):
model = clf_pipe.fit(data_train[train_inx], y_train[train_inx])
pred = model.predict_proba(data_train[test_inx])
fpr, tpr, thresh = metrics.roc_curve(y_train[test_inx], pred[:, 1])
auc_res += metrics.auc(fpr, tpr)
current_time = datetime.datetime.now().time().isoformat()
print("Alpha = %.2f ----> AUC = %.4f (%s)" %(param, auc_res/len(cv), current_time))
|
maxentile/msm-learn
|
refs/heads/master
|
projects/metric-learning/KPCA Inverse Transform.py
|
1
|
# coding: utf-8
# In[1]:
from sklearn.decomposition import KernelPCA
# In[2]:
import matplotlib.pyplot as plt
# In[3]:
get_ipython().magic(u'matplotlib inline')
# In[4]:
from sklearn.datasets import load_digits
X,Y = load_digits().data,load_digits().target
# In[7]:
kpca = KernelPCA(gamma=0.1,kernel='rbf',fit_inverse_transform=True)
X_ = kpca.fit_transform(X)
# In[8]:
plt.scatter(X_[:,0],X_[:,1],c=Y,cmap='rainbow',linewidths=0,s=4)
# In[ ]:
|
lllucius/climacast
|
refs/heads/master
|
requests/packages/chardet/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
sharbison3/python-docs-samples
|
refs/heads/master
|
appengine/standard/ndb/modeling/relation_model_models_test.py
|
8
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes for code snippet for modeling article."""
from google.appengine.ext import ndb
import relation_model_models as models
def test_relationship(testbed):
# Creates 1 contact and 2 companies
addressbook_key = ndb.Key('AddressBook', 'tmatsuo')
mary = models.Contact(parent=addressbook_key, name='Mary')
mary.put()
google = models.Company(name='Google')
google.put()
candit = models.Company(name='Candit')
candit.put()
# first google hires Mary
models.ContactCompany(parent=addressbook_key,
contact=mary.key,
company=google.key,
title='engineer').put()
# then another company named 'candit' hires Mary too
models.ContactCompany(parent=addressbook_key,
contact=mary.key,
company=candit.key,
title='president').put()
# get the list of companies that Mary belongs to
assert len(mary.companies) == 2
|
haveal/googleads-python-lib
|
refs/heads/master
|
examples/dfp/v201505/creative_template_service/__init__.py
|
618
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
lisa-lab/pylearn2
|
refs/heads/master
|
pylearn2/datasets/tests/test_transformer_dataset.py
|
44
|
"""
Unit tests for ../transformer_dataset.py
"""
import os
import pylearn2
from pylearn2.blocks import Block
from pylearn2.datasets.csv_dataset import CSVDataset
from pylearn2.datasets.transformer_dataset import TransformerDataset
def test_transformer_iterator():
"""
Tests whether TransformerIterator is iterable
"""
test_path = os.path.join(pylearn2.__path__[0],
'datasets', 'tests', 'test.csv')
raw = CSVDataset(path=test_path, expect_headers=False)
block = Block()
dataset = TransformerDataset(raw, block)
iterator = dataset.iterator('shuffled_sequential', 3)
try:
iter(iterator)
except TypeError:
assert False, "TransformerIterator isn't iterable"
|
dc3-plaso/plaso
|
refs/heads/master
|
plaso/cli/helpers/windows_services_analysis.py
|
1
|
# -*- coding: utf-8 -*-
"""The Windows Services analysis plugin CLI arguments helper."""
from plaso.lib import errors
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.analysis import windows_services
class WindowsServicesAnalysisArgumentsHelper(interface.ArgumentsHelper):
"""Windows Services analysis plugin CLI arguments helper."""
NAME = u'windows_services'
CATEGORY = u'analysis'
DESCRIPTION = u'Argument helper for the Windows Services analysis plugin.'
_DEFAULT_OUTPUT = u'text'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
u'--windows-services-output', dest=u'windows_services_output',
type=str, action=u'store', default=cls._DEFAULT_OUTPUT,
choices=[u'text', u'yaml'], help=(
u'Specify how the results should be displayed. Options are text '
u'and yaml.'))
@classmethod
def ParseOptions(cls, options, analysis_plugin):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (WindowsServicePlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
"""
if not isinstance(
analysis_plugin, windows_services.WindowsServicesAnalysisPlugin):
raise errors.BadConfigObject((
u'Analysis plugin is not an instance of '
u'WindowsServicesAnalysisPlugin'))
output_format = cls._ParseStringOption(
options, u'windows_services_output', default_value=cls._DEFAULT_OUTPUT)
analysis_plugin.SetOutputFormat(output_format)
manager.ArgumentHelperManager.RegisterHelper(
WindowsServicesAnalysisArgumentsHelper)
|
cloudant/bigcouch
|
refs/heads/master
|
couchjs/scons/scons-local-2.0.1/SCons/Environment.py
|
61
|
"""SCons.Environment
Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Environment.py 5134 2010/08/16 23:02:40 bdeegan"
import copy
import os
import sys
import re
import shlex
from collections import UserDict
import SCons.Action
import SCons.Builder
from SCons.Debug import logInstanceCreation
import SCons.Defaults
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Node.Python
import SCons.Platform
import SCons.SConf
import SCons.SConsign
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
_warn_copy_deprecated = True
_warn_source_signatures_deprecated = True
_warn_target_signatures_deprecated = True
CleanTargets = {}
CalculatorArgs = {}
semi_deepcopy = SCons.Util.semi_deepcopy
# Pull UserError into the global name space for the benefit of
# Environment().SourceSignatures(), which has some import statements
# which seem to mess up its ability to reference SCons directly.
UserError = SCons.Errors.UserError
def alias_builder(env, target, source):
pass
AliasBuilder = SCons.Builder.Builder(action = alias_builder,
target_factory = SCons.Node.Alias.default_ans.Alias,
source_factory = SCons.Node.FS.Entry,
multi = 1,
is_explicit = None,
name='AliasBuilder')
def apply_tools(env, tools, toolpath):
# Store the toolpath in the Environment.
if toolpath is not None:
env['toolpath'] = toolpath
if not tools:
return
# Filter out null tools from the list.
for tool in [_f for _f in tools if _f]:
if SCons.Util.is_List(tool) or isinstance(tool, tuple):
toolname = tool[0]
toolargs = tool[1] # should be a dict of kw args
tool = env.Tool(toolname, **toolargs)
else:
env.Tool(tool)
# These names are (or will be) controlled by SCons; users should never
# set or override them. This warning can optionally be turned off,
# but scons will still ignore the illegal variable names even if it's off.
reserved_construction_var_names = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
future_reserved_construction_var_names = [
#'HOST_OS',
#'HOST_ARCH',
#'HOST_CPU',
]
def copy_non_reserved_keywords(dict):
result = semi_deepcopy(dict)
for k in result.keys():
if k in reserved_construction_var_names:
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k)
del result[k]
return result
def _set_reserved(env, key, value):
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % key)
def _set_future_reserved(env, key, value):
env._dict[key] = value
msg = "`$%s' will be reserved in a future release and setting it will become ignored"
SCons.Warnings.warn(SCons.Warnings.FutureReservedVariableWarning, msg % key)
def _set_BUILDERS(env, key, value):
try:
bd = env._dict[key]
for k in bd.keys():
del bd[k]
except KeyError:
bd = BuilderDict(kwbd, env)
env._dict[key] = bd
for k, v in value.items():
if not SCons.Builder.is_a_Builder(v):
raise SCons.Errors.UserError('%s is not a Builder.' % repr(v))
bd.update(value)
def _del_SCANNERS(env, key):
del env._dict[key]
env.scanner_map_delete()
def _set_SCANNERS(env, key, value):
env._dict[key] = value
env.scanner_map_delete()
def _delete_duplicates(l, keep_last):
"""Delete duplicates from a sequence, keeping the first or last."""
seen={}
result=[]
if keep_last: # reverse in & out, then keep first
l.reverse()
for i in l:
try:
if i not in seen:
result.append(i)
seen[i]=1
except TypeError:
# probably unhashable. Just keep it.
result.append(i)
if keep_last:
result.reverse()
return result
# The following is partly based on code in a comment added by Peter
# Shannon at the following page (there called the "transplant" class):
#
# ASPN : Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
#
# We had independently been using the idiom as BuilderWrapper, but
# factoring out the common parts into this base class, and making
# BuilderWrapper a subclass that overrides __call__() to enforce specific
# Builder calling conventions, simplified some of our higher-layer code.
class MethodWrapper(object):
"""
A generic Wrapper class that associates a method (which can
actually be any callable) with an object. As part of creating this
MethodWrapper object an attribute with the specified (by default,
the name of the supplied method) is added to the underlying object.
When that new "method" is called, our __call__() method adds the
object as the first argument, simulating the Python behavior of
supplying "self" on method calls.
We hang on to the name by which the method was added to the underlying
base class so that we can provide a method to "clone" ourselves onto
a new underlying object being copied (without which we wouldn't need
to save that info).
"""
def __init__(self, object, method, name=None):
if name is None:
name = method.__name__
self.object = object
self.method = method
self.name = name
setattr(self.object, name, self)
def __call__(self, *args, **kwargs):
nargs = (self.object,) + args
return self.method(*nargs, **kwargs)
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
class BuilderWrapper(MethodWrapper):
"""
A MethodWrapper subclass that that associates an environment with
a Builder.
This mainly exists to wrap the __call__() function so that all calls
to Builders can have their argument lists massaged in the same way
(treat a lone argument as the source, treat two arguments as target
then source, make sure both target and source are lists) without
having to have cut-and-paste code to do it.
As a bit of obsessive backwards compatibility, we also intercept
attempts to get or set the "env" or "builder" attributes, which were
the names we used before we put the common functionality into the
MethodWrapper base class. We'll keep this around for a while in case
people shipped Tool modules that reached into the wrapper (like the
Tool/qt.py module does, or did). There shouldn't be a lot attribute
fetching or setting on these, so a little extra work shouldn't hurt.
"""
def __call__(self, target=None, source=_null, *args, **kw):
if source is _null:
source = target
target = None
if target is not None and not SCons.Util.is_List(target):
target = [target]
if source is not None and not SCons.Util.is_List(source):
source = [source]
return MethodWrapper.__call__(self, target, source, *args, **kw)
def __repr__(self):
return '<BuilderWrapper %s>' % repr(self.name)
def __str__(self):
return self.__repr__()
def __getattr__(self, name):
if name == 'env':
return self.object
elif name == 'builder':
return self.method
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'env':
self.object = value
elif name == 'builder':
self.method = value
else:
self.__dict__[name] = value
# This allows a Builder to be executed directly
# through the Environment to which it's attached.
# In practice, we shouldn't need this, because
# builders actually get executed through a Node.
# But we do have a unit test for this, and can't
# yet rule out that it would be useful in the
# future, so leave it for now.
#def execute(self, **kw):
# kw['env'] = self.env
# self.builder.execute(**kw)
class BuilderDict(UserDict):
"""This is a dictionary-like class used by an Environment to hold
the Builders. We need to do this because every time someone changes
the Builders in the Environment's BUILDERS dictionary, we must
update the Environment's attributes."""
def __init__(self, dict, env):
# Set self.env before calling the superclass initialization,
# because it will end up calling our other methods, which will
# need to point the values in this dictionary to self.env.
self.env = env
UserDict.__init__(self, dict)
def __semi_deepcopy__(self):
return self.__class__(self.data, self.env)
def __setitem__(self, item, val):
try:
method = getattr(self.env, item).method
except AttributeError:
pass
else:
self.env.RemoveMethod(method)
UserDict.__setitem__(self, item, val)
BuilderWrapper(self.env, val, item)
def __delitem__(self, item):
UserDict.__delitem__(self, item)
delattr(self.env, item)
def update(self, dict):
for i, v in dict.items():
self.__setitem__(i, v)
_is_valid_var = re.compile(r'[_a-zA-Z]\w*$')
def is_valid_construction_var(varstr):
"""Return if the specified string is a legitimate construction
variable.
"""
return _is_valid_var.match(varstr)
class SubstitutionEnvironment(object):
"""Base class for different flavors of construction environments.
This class contains a minimal set of methods that handle contruction
variable expansion and conversion of strings to Nodes, which may or
may not be actually useful as a stand-alone class. Which methods
ended up in this class is pretty arbitrary right now. They're
basically the ones which we've empirically determined are common to
the different construction environment subclasses, and most of the
others that use or touch the underlying dictionary of construction
variables.
Eventually, this class should contain all the methods that we
determine are necessary for a "minimal" interface to the build engine.
A full "native Python" SCons environment has gotten pretty heavyweight
with all of the methods and Tools and construction variables we've
jammed in there, so it would be nice to have a lighter weight
alternative for interfaces that don't need all of the bells and
whistles. (At some point, we'll also probably rename this class
"Base," since that more reflects what we want this class to become,
but because we've released comments that tell people to subclass
Environment.Base to create their own flavors of construction
environment, we'll save that for a future refactoring when this
class actually becomes useful.)
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
def __init__(self, **kw):
"""Initialization of an underlying SubstitutionEnvironment class.
"""
if __debug__: logInstanceCreation(self, 'Environment.SubstitutionEnvironment')
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = kw.copy()
self._init_special()
self.added_methods = []
#self._memo = {}
def _init_special(self):
"""Initial the dispatch tables for special handling of
special construction variables."""
self._special_del = {}
self._special_del['SCANNERS'] = _del_SCANNERS
self._special_set = {}
for key in reserved_construction_var_names:
self._special_set[key] = _set_reserved
for key in future_reserved_construction_var_names:
self._special_set[key] = _set_future_reserved
self._special_set['BUILDERS'] = _set_BUILDERS
self._special_set['SCANNERS'] = _set_SCANNERS
# Freeze the keys of self._special_set in a list for use by
# methods that need to check. (Empirically, list scanning has
# gotten better than dict.has_key() in Python 2.5.)
self._special_set_keys = list(self._special_set.keys())
def __cmp__(self, other):
return cmp(self._dict, other._dict)
def __delitem__(self, key):
special = self._special_del.get(key)
if special:
special(self, key)
else:
del self._dict[key]
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
# This is heavily used. This implementation is the best we have
# according to the timings in bench/env.__setitem__.py.
#
# The "key in self._special_set_keys" test here seems to perform
# pretty well for the number of keys we have. A hard-coded
# list works a little better in Python 2.5, but that has the
# disadvantage of maybe getting out of sync if we ever add more
# variable names. Using self._special_set.has_key() works a
# little better in Python 2.4, but is worse than this test.
# So right now it seems like a good trade-off, but feel free to
# revisit this with bench/env.__setitem__.py as needed (and
# as newer versions of Python come out).
if key in self._special_set_keys:
self._special_set[key](self, key, value)
else:
# If we already have the entry, then it's obviously a valid
# key and we don't need to check. If we do check, using a
# global, pre-compiled regular expression directly is more
# efficient than calling another function or a method.
if key not in self._dict \
and not _is_valid_var.match(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self._dict[key] = value
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
return self._dict.get(key, default)
def has_key(self, key):
return key in self._dict
def __contains__(self, key):
return self._dict.__contains__(key)
def items(self):
return list(self._dict.items())
def arg2nodes(self, args, node_factory=_null, lookup_list=_null, **kw):
if node_factory is _null:
node_factory = self.fs.File
if lookup_list is _null:
lookup_list = self.lookup_list
if not args:
return []
args = SCons.Util.flatten(args)
nodes = []
for v in args:
if SCons.Util.is_String(v):
n = None
for l in lookup_list:
n = l(v)
if n is not None:
break
if n is not None:
if SCons.Util.is_String(n):
# n = self.subst(n, raw=1, **kw)
kw['raw'] = 1
n = self.subst(n, **kw)
if node_factory:
n = node_factory(n)
if SCons.Util.is_List(n):
nodes.extend(n)
else:
nodes.append(n)
elif node_factory:
# v = node_factory(self.subst(v, raw=1, **kw))
kw['raw'] = 1
v = node_factory(self.subst(v, **kw))
if SCons.Util.is_List(v):
nodes.extend(v)
else:
nodes.append(v)
else:
nodes.append(v)
return nodes
def gvars(self):
return self._dict
def lvars(self):
return {}
def subst(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters.
"""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv)
def subst_kw(self, kw, raw=0, target=None, source=None):
nkw = {}
for k, v in kw.items():
k = self.subst(k, raw, target, source)
if SCons.Util.is_String(v):
v = self.subst(v, raw, target, source)
nkw[k] = v
return nkw
def subst_list(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Calls through to SCons.Subst.scons_subst_list(). See
the documentation for that function."""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst_list(string, self, raw, target, source, gvars, lvars, conv)
def subst_path(self, path, target=None, source=None):
"""Substitute a path list, turning EntryProxies into Nodes
and leaving Nodes (and other objects) as-is."""
if not SCons.Util.is_List(path):
path = [path]
def s(obj):
"""This is the "string conversion" routine that we have our
substitutions use to return Nodes, not strings. This relies
on the fact that an EntryProxy object has a get() method that
returns the underlying Node that it wraps, which is a bit of
architectural dependence that we might need to break or modify
in the future in response to additional requirements."""
try:
get = obj.get
except AttributeError:
obj = SCons.Util.to_String_for_subst(obj)
else:
obj = get()
return obj
r = []
for p in path:
if SCons.Util.is_String(p):
p = self.subst(p, target=target, source=source, conv=s)
if SCons.Util.is_List(p):
if len(p) == 1:
p = p[0]
else:
# We have an object plus a string, or multiple
# objects that we need to smush together. No choice
# but to make them into a string.
p = ''.join(map(SCons.Util.to_String_for_subst, p))
else:
p = s(p)
r.append(p)
return r
subst_target_source = subst
def backtick(self, command):
import subprocess
# common arguments
kw = { 'stdin' : 'devnull',
'stdout' : subprocess.PIPE,
'stderr' : subprocess.PIPE,
'universal_newlines' : True,
}
# if the command is a list, assume it's been quoted
# othewise force a shell
if not SCons.Util.is_List(command): kw['shell'] = True
# run constructed command
p = SCons.Action._subproc(self, command, **kw)
out,err = p.communicate()
status = p.wait()
if err:
sys.stderr.write(unicode(err))
if status:
raise OSError("'%s' exited %d" % (command, status))
return out
def AddMethod(self, function, name=None):
"""
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
"""
method = MethodWrapper(self, function, name)
self.added_methods.append(method)
def RemoveMethod(self, function):
"""
Removes the specified function's MethodWrapper from the
added_methods list, so we don't re-bind it when making a clone.
"""
self.added_methods = [dm for dm in self.added_methods if not dm.method is function]
def Override(self, overrides):
"""
Produce a modified environment whose variables are overriden by
the overrides dictionaries. "overrides" is a dictionary that
will override the variables of this environment.
This function is much more efficient than Clone() or creating
a new Environment because it doesn't copy the construction
environment dictionary, it just wraps the underlying construction
environment, and doesn't even create a wrapper object if there
are no overrides.
"""
if not overrides: return self
o = copy_non_reserved_keywords(overrides)
if not o: return self
overrides = {}
merges = None
for key, value in o.items():
if key == 'parse_flags':
merges = value
else:
overrides[key] = SCons.Subst.scons_subst_once(value, self, key)
env = OverrideEnvironment(self, overrides)
if merges: env.MergeFlags(merges)
return env
def ParseFlags(self, *flags):
"""
Parse the set of flags and return a dict with the flags placed
in the appropriate entry. The flags are treated as a typical
set of command-line flags for a GNU-like toolchain and used to
populate the entries in the dict immediately below. If one of
the flag strings begins with a bang (exclamation mark), it is
assumed to be a command and the rest of the string is executed;
the result of that evaluation is then added to the dict.
"""
dict = {
'ASFLAGS' : SCons.Util.CLVar(''),
'CFLAGS' : SCons.Util.CLVar(''),
'CCFLAGS' : SCons.Util.CLVar(''),
'CPPDEFINES' : [],
'CPPFLAGS' : SCons.Util.CLVar(''),
'CPPPATH' : [],
'FRAMEWORKPATH' : SCons.Util.CLVar(''),
'FRAMEWORKS' : SCons.Util.CLVar(''),
'LIBPATH' : [],
'LIBS' : [],
'LINKFLAGS' : SCons.Util.CLVar(''),
'RPATH' : [],
}
def do_parse(arg):
# if arg is a sequence, recurse with each element
if not arg:
return
if not SCons.Util.is_String(arg):
for t in arg: do_parse(t)
return
# if arg is a command, execute it
if arg[0] == '!':
arg = self.backtick(arg[1:])
# utility function to deal with -D option
def append_define(name, dict = dict):
t = name.split('=')
if len(t) == 1:
dict['CPPDEFINES'].append(name)
else:
dict['CPPDEFINES'].append([t[0], '='.join(t[1:])])
# Loop through the flags and add them to the appropriate option.
# This tries to strike a balance between checking for all possible
# flags and keeping the logic to a finite size, so it doesn't
# check for some that don't occur often. It particular, if the
# flag is not known to occur in a config script and there's a way
# of passing the flag to the right place (by wrapping it in a -W
# flag, for example) we don't check for it. Note that most
# preprocessor options are not handled, since unhandled options
# are placed in CCFLAGS, so unless the preprocessor is invoked
# separately, these flags will still get to the preprocessor.
# Other options not currently handled:
# -iqoutedir (preprocessor search path)
# -u symbol (linker undefined symbol)
# -s (linker strip files)
# -static* (linker static binding)
# -shared* (linker dynamic binding)
# -symbolic (linker global binding)
# -R dir (deprecated linker rpath)
# IBM compilers may also accept -qframeworkdir=foo
params = shlex.split(arg)
append_next_arg_to = None # for multi-word args
for arg in params:
if append_next_arg_to:
if append_next_arg_to == 'CPPDEFINES':
append_define(arg)
elif append_next_arg_to == '-include':
t = ('-include', self.fs.File(arg))
dict['CCFLAGS'].append(t)
elif append_next_arg_to == '-isysroot':
t = ('-isysroot', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
elif append_next_arg_to == '-arch':
t = ('-arch', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
else:
dict[append_next_arg_to].append(arg)
append_next_arg_to = None
elif not arg[0] in ['-', '+']:
dict['LIBS'].append(self.fs.File(arg))
elif arg[:2] == '-L':
if arg[2:]:
dict['LIBPATH'].append(arg[2:])
else:
append_next_arg_to = 'LIBPATH'
elif arg[:2] == '-l':
if arg[2:]:
dict['LIBS'].append(arg[2:])
else:
append_next_arg_to = 'LIBS'
elif arg[:2] == '-I':
if arg[2:]:
dict['CPPPATH'].append(arg[2:])
else:
append_next_arg_to = 'CPPPATH'
elif arg[:4] == '-Wa,':
dict['ASFLAGS'].append(arg[4:])
dict['CCFLAGS'].append(arg)
elif arg[:4] == '-Wl,':
if arg[:11] == '-Wl,-rpath=':
dict['RPATH'].append(arg[11:])
elif arg[:7] == '-Wl,-R,':
dict['RPATH'].append(arg[7:])
elif arg[:6] == '-Wl,-R':
dict['RPATH'].append(arg[6:])
else:
dict['LINKFLAGS'].append(arg)
elif arg[:4] == '-Wp,':
dict['CPPFLAGS'].append(arg)
elif arg[:2] == '-D':
if arg[2:]:
append_define(arg[2:])
else:
append_next_arg_to = 'CPPDEFINES'
elif arg == '-framework':
append_next_arg_to = 'FRAMEWORKS'
elif arg[:14] == '-frameworkdir=':
dict['FRAMEWORKPATH'].append(arg[14:])
elif arg[:2] == '-F':
if arg[2:]:
dict['FRAMEWORKPATH'].append(arg[2:])
else:
append_next_arg_to = 'FRAMEWORKPATH'
elif arg == '-mno-cygwin':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg == '-mwindows':
dict['LINKFLAGS'].append(arg)
elif arg == '-pthread':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg[:5] == '-std=':
dict['CFLAGS'].append(arg) # C only
elif arg[0] == '+':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg in ['-include', '-isysroot', '-arch']:
append_next_arg_to = arg
else:
dict['CCFLAGS'].append(arg)
for arg in flags:
do_parse(arg)
return dict
def MergeFlags(self, args, unique=1, dict=None):
"""
Merge the dict in args into the construction variables of this
env, or the passed-in dict. If args is not a dict, it is
converted into a dict using ParseFlags. If unique is not set,
the flags are appended rather than merged.
"""
if dict is None:
dict = self
if not SCons.Util.is_Dict(args):
args = self.ParseFlags(args)
if not unique:
self.Append(**args)
return self
for key, value in args.items():
if not value:
continue
try:
orig = self[key]
except KeyError:
orig = value
else:
if not orig:
orig = value
elif value:
# Add orig and value. The logic here was lifted from
# part of env.Append() (see there for a lot of comments
# about the order in which things are tried) and is
# used mainly to handle coercion of strings to CLVar to
# "do the right thing" given (e.g.) an original CCFLAGS
# string variable like '-pipe -Wall'.
try:
orig = orig + value
except (KeyError, TypeError):
try:
add_to_orig = orig.append
except AttributeError:
value.insert(0, orig)
orig = value
else:
add_to_orig(value)
t = []
if key[-4:] == 'PATH':
### keep left-most occurence
for v in orig:
if v not in t:
t.append(v)
else:
### keep right-most occurence
orig.reverse()
for v in orig:
if v not in t:
t.insert(0, v)
self[key] = t
return self
# def MergeShellPaths(self, args, prepend=1):
# """
# Merge the dict in args into the shell environment in env['ENV'].
# Shell path elements are appended or prepended according to prepend.
# Uses Pre/AppendENVPath, so it always appends or prepends uniquely.
# Example: env.MergeShellPaths({'LIBPATH': '/usr/local/lib'})
# prepends /usr/local/lib to env['ENV']['LIBPATH'].
# """
# for pathname, pathval in args.items():
# if not pathval:
# continue
# if prepend:
# self.PrependENVPath(pathname, pathval)
# else:
# self.AppendENVPath(pathname, pathval)
def default_decide_source(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_source
return f(dependency, target, prev_ni)
def default_decide_target(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_target
return f(dependency, target, prev_ni)
def default_copy_from_cache(src, dst):
f = SCons.Defaults.DefaultEnvironment().copy_from_cache
return f(src, dst)
class Base(SubstitutionEnvironment):
"""Base class for "real" construction Environments. These are the
primary objects used to communicate dependency and construction
information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment.
"""
memoizer_counters = []
#######################################################################
# This is THE class for interacting with the SCons build engine,
# and it contains a lot of stuff, so we're going to try to keep this
# a little organized by grouping the methods.
#######################################################################
#######################################################################
# Methods that make an Environment act like a dictionary. These have
# the expected standard names for Python mapping objects. Note that
# we don't actually make an Environment a subclass of UserDict for
# performance reasons. Note also that we only supply methods for
# dictionary functionality that we actually need and use.
#######################################################################
def __init__(self,
platform=None,
tools=None,
toolpath=None,
variables=None,
parse_flags = None,
**kw):
"""
Initialization of a basic SCons construction environment,
including setting up special construction variables like BUILDER,
PLATFORM, etc., and searching for and applying available Tools.
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we need to
initialize things in a very specific order that doesn't work
with the much simpler base class initialization.
"""
if __debug__: logInstanceCreation(self, 'Environment.Base')
self._memo = {}
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = semi_deepcopy(SCons.Defaults.ConstructionEnvironment)
self._init_special()
self.added_methods = []
# We don't use AddMethod, or define these as methods in this
# class, because we *don't* want these functions to be bound
# methods. They need to operate independently so that the
# settings will work properly regardless of whether a given
# target ends up being built with a Base environment or an
# OverrideEnvironment or what have you.
self.decide_target = default_decide_target
self.decide_source = default_decide_source
self.copy_from_cache = default_copy_from_cache
self._dict['BUILDERS'] = BuilderDict(self._dict['BUILDERS'], self)
if platform is None:
platform = self._dict.get('PLATFORM', None)
if platform is None:
platform = SCons.Platform.Platform()
if SCons.Util.is_String(platform):
platform = SCons.Platform.Platform(platform)
self._dict['PLATFORM'] = str(platform)
platform(self)
self._dict['HOST_OS'] = self._dict.get('HOST_OS',None)
self._dict['HOST_ARCH'] = self._dict.get('HOST_ARCH',None)
# Now set defaults for TARGET_{OS|ARCH}
self._dict['TARGET_OS'] = self._dict.get('HOST_OS',None)
self._dict['TARGET_ARCH'] = self._dict.get('HOST_ARCH',None)
# Apply the passed-in and customizable variables to the
# environment before calling the tools, because they may use
# some of them during initialization.
if 'options' in kw:
# Backwards compatibility: they may stll be using the
# old "options" keyword.
variables = kw['options']
del kw['options']
self.Replace(**kw)
keys = list(kw.keys())
if variables:
keys = keys + list(variables.keys())
variables.Update(self)
save = {}
for k in keys:
try:
save[k] = self._dict[k]
except KeyError:
# No value may have been set if they tried to pass in a
# reserved variable name like TARGETS.
pass
SCons.Tool.Initializers(self)
if tools is None:
tools = self._dict.get('TOOLS', None)
if tools is None:
tools = ['default']
apply_tools(self, tools, toolpath)
# Now restore the passed-in and customized variables
# to the environment, since the values the user set explicitly
# should override any values set by the tools.
for key, val in save.items():
self._dict[key] = val
# Finally, apply any flags to be merged in
if parse_flags: self.MergeFlags(parse_flags)
#######################################################################
# Utility methods that are primarily for internal use by SCons.
# These begin with lower-case letters.
#######################################################################
def get_builder(self, name):
"""Fetch the builder with the specified name from the environment.
"""
try:
return self._dict['BUILDERS'][name]
except KeyError:
return None
def get_CacheDir(self):
try:
path = self._CacheDir_path
except AttributeError:
path = SCons.Defaults.DefaultEnvironment()._CacheDir_path
try:
if path == self._last_CacheDir_path:
return self._last_CacheDir
except AttributeError:
pass
cd = SCons.CacheDir.CacheDir(path)
self._last_CacheDir_path = path
self._last_CacheDir = cd
return cd
def get_factory(self, factory, default='File'):
"""Return a factory function for creating Nodes for this
construction environment.
"""
name = default
try:
is_node = issubclass(factory, SCons.Node.FS.Base)
except TypeError:
# The specified factory isn't a Node itself--it's
# most likely None, or possibly a callable.
pass
else:
if is_node:
# The specified factory is a Node (sub)class. Try to
# return the FS method that corresponds to the Node's
# name--that is, we return self.fs.Dir if they want a Dir,
# self.fs.File for a File, etc.
try: name = factory.__name__
except AttributeError: pass
else: factory = None
if not factory:
# They passed us None, or we picked up a name from a specified
# class, so return the FS method. (Note that we *don't*
# use our own self.{Dir,File} methods because that would
# cause env.subst() to be called twice on the file name,
# interfering with files that have $$ in them.)
factory = getattr(self.fs, name)
return factory
memoizer_counters.append(SCons.Memoize.CountValue('_gsm'))
def _gsm(self):
try:
return self._memo['_gsm']
except KeyError:
pass
result = {}
try:
scanners = self._dict['SCANNERS']
except KeyError:
pass
else:
# Reverse the scanner list so that, if multiple scanners
# claim they can scan the same suffix, earlier scanners
# in the list will overwrite later scanners, so that
# the result looks like a "first match" to the user.
if not SCons.Util.is_List(scanners):
scanners = [scanners]
else:
scanners = scanners[:] # copy so reverse() doesn't mod original
scanners.reverse()
for scanner in scanners:
for k in scanner.get_skeys(self):
if k and self['PLATFORM'] == 'win32':
k = k.lower()
result[k] = scanner
self._memo['_gsm'] = result
return result
def get_scanner(self, skey):
"""Find the appropriate scanner given a key (usually a file suffix).
"""
if skey and self['PLATFORM'] == 'win32':
skey = skey.lower()
return self._gsm().get(skey)
def scanner_map_delete(self, kw=None):
"""Delete the cached scanner map (if we need to).
"""
try:
del self._memo['_gsm']
except KeyError:
pass
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self._dict.update(dict)
def get_src_sig_type(self):
try:
return self.src_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().src_sig_type
self.src_sig_type = t
return t
def get_tgt_sig_type(self):
try:
return self.tgt_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().tgt_sig_type
self.tgt_sig_type = t
return t
#######################################################################
# Public methods for manipulating an Environment. These begin with
# upper-case letters. The essential characteristic of methods in
# this section is that they do *not* have corresponding same-named
# global functions. For example, a stand-alone Append() function
# makes no sense, because Append() is all about appending values to
# an Environment's construction variables.
#######################################################################
def Append(self, **kw):
"""Append values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = orig + val
except (KeyError, TypeError):
try:
# Check if the original is a list.
add_to_orig = orig.append
except AttributeError:
# The original isn't a list, but the new
# value is (by process of elimination),
# so insert the original in the new value
# (if there's one to insert) and replace
# the variable with it.
if orig:
val.insert(0, orig)
self._dict[key] = val
else:
# The original is a list, so append the new
# value to it (if there's a value to append).
if val:
add_to_orig(val)
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
# allow Dirs and strings beginning with # for top-relative
# Note this uses the current env's fs (in self).
def _canonicalize(self, path):
if not SCons.Util.is_String(path): # typically a Dir
path = str(path)
if path and path[0] == '#':
path = str(self.fs.Dir(path))
return path
def AppendENVPath(self, name, newpath, envname = 'ENV',
sep = os.pathsep, delete_existing=1):
"""Append path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the end (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.AppendPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def AppendUnique(self, delete_existing=0, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw)
def Clone(self, tools=[], toolpath=None, parse_flags = None, **kw):
"""Return a copy of a construction Environment. The
copy is like a Python "deep copy"--that is, independent
copies are made recursively of each objects--except that
a reference is copied when an object is not deep-copyable
(like a function). There are no references to any mutable
objects in the original Environment.
"""
clone = copy.copy(self)
clone._dict = semi_deepcopy(self._dict)
try:
cbd = clone._dict['BUILDERS']
except KeyError:
pass
else:
clone._dict['BUILDERS'] = BuilderDict(cbd, clone)
# Check the methods added via AddMethod() and re-bind them to
# the cloned environment. Only do this if the attribute hasn't
# been overwritten by the user explicitly and still points to
# the added method.
clone.added_methods = []
for mw in self.added_methods:
if mw == getattr(self, mw.name):
clone.added_methods.append(mw.clone(clone))
clone._memo = {}
# Apply passed-in variables before the tools
# so the tools can use the new variables
kw = copy_non_reserved_keywords(kw)
new = {}
for key, value in kw.items():
new[key] = SCons.Subst.scons_subst_once(value, self, key)
clone.Replace(**new)
apply_tools(clone, tools, toolpath)
# apply them again in case the tools overwrote them
clone.Replace(**new)
# Finally, apply any flags to be merged in
if parse_flags: clone.MergeFlags(parse_flags)
if __debug__: logInstanceCreation(self, 'Environment.EnvironmentClone')
return clone
def Copy(self, *args, **kw):
global _warn_copy_deprecated
if _warn_copy_deprecated:
msg = "The env.Copy() method is deprecated; use the env.Clone() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedCopyWarning, msg)
_warn_copy_deprecated = False
return self.Clone(*args, **kw)
def _changed_build(self, dependency, target, prev_ni):
if dependency.changed_state(target, prev_ni):
return 1
return self.decide_source(dependency, target, prev_ni)
def _changed_content(self, dependency, target, prev_ni):
return dependency.changed_content(target, prev_ni)
def _changed_source(self, dependency, target, prev_ni):
target_env = dependency.get_build_env()
type = target_env.get_tgt_sig_type()
if type == 'source':
return target_env.decide_source(dependency, target, prev_ni)
else:
return target_env.decide_target(dependency, target, prev_ni)
def _changed_timestamp_then_content(self, dependency, target, prev_ni):
return dependency.changed_timestamp_then_content(target, prev_ni)
def _changed_timestamp_newer(self, dependency, target, prev_ni):
return dependency.changed_timestamp_newer(target, prev_ni)
def _changed_timestamp_match(self, dependency, target, prev_ni):
return dependency.changed_timestamp_match(target, prev_ni)
def _copy_from_cache(self, src, dst):
return self.fs.copy(src, dst)
def _copy2_from_cache(self, src, dst):
return self.fs.copy2(src, dst)
def Decider(self, function):
copy_function = self._copy2_from_cache
if function in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
function = self._changed_content
elif function == 'MD5-timestamp':
function = self._changed_timestamp_then_content
elif function in ('timestamp-newer', 'make'):
function = self._changed_timestamp_newer
copy_function = self._copy_from_cache
elif function == 'timestamp-match':
function = self._changed_timestamp_match
elif not callable(function):
raise UserError("Unknown Decider value %s" % repr(function))
# We don't use AddMethod because we don't want to turn the
# function, which only expects three arguments, into a bound
# method, which would add self as an initial, fourth argument.
self.decide_target = function
self.decide_source = function
self.copy_from_cache = copy_function
def Detect(self, progs):
"""Return the first available program in progs.
"""
if not SCons.Util.is_List(progs):
progs = [ progs ]
for prog in progs:
path = self.WhereIs(prog)
if path: return prog
return None
def Dictionary(self, *args):
if not args:
return self._dict
dlist = [self._dict[x] for x in args]
if len(dlist) == 1:
dlist = dlist[0]
return dlist
def Dump(self, key = None):
"""
Using the standard Python pretty printer, dump the contents of the
scons build environment to stdout.
If the key passed in is anything other than None, then that will
be used as an index into the build environment dictionary and
whatever is found there will be fed into the pretty printer. Note
that this key is case sensitive.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
if key:
dict = self.Dictionary(key)
else:
dict = self.Dictionary()
return pp.pformat(dict)
def FindIxes(self, paths, prefix, suffix):
"""
Search a list of paths for something that matches the prefix and suffix.
paths - the list of paths or nodes.
prefix - construction variable for the prefix.
suffix - construction variable for the suffix.
"""
suffix = self.subst('$'+suffix)
prefix = self.subst('$'+prefix)
for path in paths:
dir,name = os.path.split(str(path))
if name[:len(prefix)] == prefix and name[-len(suffix):] == suffix:
return path
def ParseConfig(self, command, function=None, unique=1):
"""
Use the specified function to parse the output of the command
in order to modify the current environment. The 'command' can
be a string or a list of strings representing a command and
its arguments. 'Function' is an optional argument that takes
the environment, the output of the command, and the unique flag.
If no function is specified, MergeFlags, which treats the output
as the result of a typical 'X-config' command (i.e. gtk-config),
will merge the output into the appropriate variables.
"""
if function is None:
def parse_conf(env, cmd, unique=unique):
return env.MergeFlags(cmd, unique)
function = parse_conf
if SCons.Util.is_List(command):
command = ' '.join(command)
command = self.subst(command)
return function(self, self.backtick(command))
def ParseDepends(self, filename, must_exist=None, only_one=0):
"""
Parse a mkdep-style file for explicit dependencies. This is
completely abusable, and should be unnecessary in the "normal"
case of proper SCons configuration, but it may help make
the transition from a Make hierarchy easier for some people
to swallow. It can also be genuinely useful when using a tool
that can write a .d file, but for which writing a scanner would
be too complicated.
"""
filename = self.subst(filename)
try:
fp = open(filename, 'r')
except IOError:
if must_exist:
raise
return
lines = SCons.Util.LogicalLines(fp).readlines()
lines = [l for l in lines if l[0] != '#']
tdlist = []
for line in lines:
try:
target, depends = line.split(':', 1)
except (AttributeError, ValueError):
# Throws AttributeError if line isn't a string. Can throw
# ValueError if line doesn't split into two or more elements.
pass
else:
tdlist.append((target.split(), depends.split()))
if only_one:
targets = []
for td in tdlist:
targets.extend(td[0])
if len(targets) > 1:
raise SCons.Errors.UserError(
"More than one dependency target found in `%s': %s"
% (filename, targets))
for target, depends in tdlist:
self.Depends(target, depends)
def Platform(self, platform):
platform = self.subst(platform)
return SCons.Platform.Platform(platform)(self)
def Prepend(self, **kw):
"""Prepend values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = val + orig
except (KeyError, TypeError):
try:
# Check if the added value is a list.
add_to_val = val.append
except AttributeError:
# The added value isn't a list, but the
# original is (by process of elimination),
# so insert the the new value in the original
# (if there's one to insert).
if val:
orig.insert(0, val)
else:
# The added value is a list, so append
# the original to it (if there's a value
# to append).
if orig:
add_to_val(orig)
self._dict[key] = val
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep,
delete_existing=1):
"""Prepend path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the front (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def PrependUnique(self, delete_existing=0, **kw):
"""Prepend values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to front.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, not delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = val + dk
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = [val] + dk
else:
if not val in dk:
self._dict[key] = [val] + dk
else:
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = val + dk
self.scanner_map_delete(kw)
def Replace(self, **kw):
"""Replace existing construction variables in an Environment
with new construction variables and/or values.
"""
try:
kwbd = kw['BUILDERS']
except KeyError:
pass
else:
kwbd = semi_deepcopy(kwbd)
del kw['BUILDERS']
self.__setitem__('BUILDERS', kwbd)
kw = copy_non_reserved_keywords(kw)
self._update(semi_deepcopy(kw))
self.scanner_map_delete(kw)
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix):
"""
Replace old_prefix with new_prefix and old_suffix with new_suffix.
env - Environment used to interpolate variables.
path - the path that will be modified.
old_prefix - construction variable for the old prefix.
old_suffix - construction variable for the old suffix.
new_prefix - construction variable for the new prefix.
new_suffix - construction variable for the new suffix.
"""
old_prefix = self.subst('$'+old_prefix)
old_suffix = self.subst('$'+old_suffix)
new_prefix = self.subst('$'+new_prefix)
new_suffix = self.subst('$'+new_suffix)
dir,name = os.path.split(str(path))
if name[:len(old_prefix)] == old_prefix:
name = name[len(old_prefix):]
if name[-len(old_suffix):] == old_suffix:
name = name[:-len(old_suffix)]
return os.path.join(dir, new_prefix+name+new_suffix)
def SetDefault(self, **kw):
for k in kw.keys():
if k in self._dict:
del kw[k]
self.Replace(**kw)
def _find_toolpath_dir(self, tp):
return self.fs.Dir(self.subst(tp)).srcnode().abspath
def Tool(self, tool, toolpath=None, **kw):
if SCons.Util.is_String(tool):
tool = self.subst(tool)
if toolpath is None:
toolpath = self.get('toolpath', [])
toolpath = list(map(self._find_toolpath_dir, toolpath))
tool = SCons.Tool.Tool(tool, toolpath, **kw)
tool(self)
def WhereIs(self, prog, path=None, pathext=None, reject=[]):
"""Find prog in the path.
"""
if path is None:
try:
path = self['ENV']['PATH']
except KeyError:
pass
elif SCons.Util.is_String(path):
path = self.subst(path)
if pathext is None:
try:
pathext = self['ENV']['PATHEXT']
except KeyError:
pass
elif SCons.Util.is_String(pathext):
pathext = self.subst(pathext)
prog = self.subst(prog)
path = SCons.Util.WhereIs(prog, path, pathext, reject)
if path: return path
return None
#######################################################################
# Public methods for doing real "SCons stuff" (manipulating
# dependencies, setting attributes on targets, etc.). These begin
# with upper-case letters. The essential characteristic of methods
# in this section is that they all *should* have corresponding
# same-named global functions.
#######################################################################
def Action(self, *args, **kw):
def subst_string(a, self=self):
if SCons.Util.is_String(a):
a = self.subst(a)
return a
nargs = list(map(subst_string, args))
nkw = self.subst_kw(kw)
return SCons.Action.Action(*nargs, **nkw)
def AddPreAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_pre_action(action)
return nodes
def AddPostAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_post_action(action)
return nodes
def Alias(self, target, source=[], action=None, **kw):
tlist = self.arg2nodes(target, self.ans.Alias)
if not SCons.Util.is_List(source):
source = [source]
source = [_f for _f in source if _f]
if not action:
if not source:
# There are no source files and no action, so just
# return a target list of classic Alias Nodes, without
# any builder. The externally visible effect is that
# this will make the wrapping Script.BuildTask class
# say that there's "Nothing to be done" for this Alias,
# instead of that it's "up to date."
return tlist
# No action, but there are sources. Re-call all the target
# builders to add the sources to each target.
result = []
for t in tlist:
bld = t.get_builder(AliasBuilder)
result.extend(bld(self, t, source))
return result
nkw = self.subst_kw(kw)
nkw.update({
'action' : SCons.Action.Action(action),
'source_factory' : self.fs.Entry,
'multi' : 1,
'is_explicit' : None,
})
bld = SCons.Builder.Builder(**nkw)
# Apply the Builder separately to each target so that the Aliases
# stay separate. If we did one "normal" Builder call with the
# whole target list, then all of the target Aliases would be
# associated under a single Executor.
result = []
for t in tlist:
# Calling the convert() method will cause a new Executor to be
# created from scratch, so we have to explicitly initialize
# it with the target's existing sources, plus our new ones,
# so nothing gets lost.
b = t.get_builder()
if b is None or b is AliasBuilder:
b = bld
else:
nkw['action'] = b.action + action
b = SCons.Builder.Builder(**nkw)
t.convert()
result.extend(b(self, t, t.sources + source))
return result
def AlwaysBuild(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_always_build()
return tlist
def BuildDir(self, *args, **kw):
msg = """BuildDir() and the build_dir keyword have been deprecated;\n\tuse VariantDir() and the variant_dir keyword instead."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuildDirWarning, msg)
if 'build_dir' in kw:
kw['variant_dir'] = kw['build_dir']
del kw['build_dir']
return self.VariantDir(*args, **kw)
def Builder(self, **kw):
nkw = self.subst_kw(kw)
return SCons.Builder.Builder(**nkw)
def CacheDir(self, path):
import SCons.CacheDir
if path is not None:
path = self.subst(path)
self._CacheDir_path = path
def Clean(self, targets, files):
global CleanTargets
tlist = self.arg2nodes(targets, self.fs.Entry)
flist = self.arg2nodes(files, self.fs.Entry)
for t in tlist:
try:
CleanTargets[t].extend(flist)
except KeyError:
CleanTargets[t] = flist
def Configure(self, *args, **kw):
nargs = [self]
if args:
nargs = nargs + self.subst_list(args)[0]
nkw = self.subst_kw(kw)
nkw['_depth'] = kw.get('_depth', 0) + 1
try:
nkw['custom_tests'] = self.subst_kw(nkw['custom_tests'])
except KeyError:
pass
return SCons.SConf.SConf(*nargs, **nkw)
def Command(self, target, source, action, **kw):
"""Builds the supplied target files from the supplied
source files using the supplied action. Action may
be any type that the Builder constructor will accept
for an action."""
bkw = {
'action' : action,
'target_factory' : self.fs.Entry,
'source_factory' : self.fs.Entry,
}
try: bkw['source_scanner'] = kw['source_scanner']
except KeyError: pass
else: del kw['source_scanner']
bld = SCons.Builder.Builder(**bkw)
return bld(self, target, source, **kw)
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist
def Dir(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Dir(e, *args, **kw))
return result
return self.fs.Dir(s, *args, **kw)
def NoClean(self, *targets):
"""Tags a target so that it will not be cleaned by -c"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_noclean()
return tlist
def NoCache(self, *targets):
"""Tags a target so that it will not be cached"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_nocache()
return tlist
def Entry(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Entry(e, *args, **kw))
return result
return self.fs.Entry(s, *args, **kw)
def Environment(self, **kw):
return SCons.Environment.Environment(**self.subst_kw(kw))
def Execute(self, action, *args, **kw):
"""Directly execute an action through an Environment
"""
action = self.Action(action, *args, **kw)
result = action([], [], self)
if isinstance(result, SCons.Errors.BuildError):
errstr = result.errstr
if result.filename:
errstr = result.filename + ': ' + errstr
sys.stderr.write("scons: *** %s\n" % errstr)
return result.status
else:
return result
def File(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.File(e, *args, **kw))
return result
return self.fs.File(s, *args, **kw)
def FindFile(self, file, dirs):
file = self.subst(file)
nodes = self.arg2nodes(dirs, self.fs.Dir)
return SCons.Node.FS.find_file(file, tuple(nodes))
def Flatten(self, sequence):
return SCons.Util.flatten(sequence)
def GetBuildPath(self, files):
result = list(map(str, self.arg2nodes(files, self.fs.Entry)))
if SCons.Util.is_List(files):
return result
else:
return result[0]
def Glob(self, pattern, ondisk=True, source=False, strings=False):
return self.fs.Glob(self.subst(pattern), ondisk, source, strings)
def Ignore(self, target, dependency):
"""Ignore a dependency."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_ignore(dlist)
return tlist
def Literal(self, string):
return SCons.Subst.Literal(string)
def Local(self, *targets):
ret = []
for targ in targets:
if isinstance(targ, SCons.Node.Node):
targ.set_local()
ret.append(targ)
else:
for t in self.arg2nodes(targ, self.fs.Entry):
t.set_local()
ret.append(t)
return ret
def Precious(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_precious()
return tlist
def Repository(self, *dirs, **kw):
dirs = self.arg2nodes(list(dirs), self.fs.Dir)
self.fs.Repository(*dirs, **kw)
def Requires(self, target, prerequisite):
"""Specify that 'prerequisite' must be built before 'target',
(but 'target' does not actually depend on 'prerequisite'
and need not be rebuilt if it changes)."""
tlist = self.arg2nodes(target, self.fs.Entry)
plist = self.arg2nodes(prerequisite, self.fs.Entry)
for t in tlist:
t.add_prerequisite(plist)
return tlist
def Scanner(self, *args, **kw):
nargs = []
for arg in args:
if SCons.Util.is_String(arg):
arg = self.subst(arg)
nargs.append(arg)
nkw = self.subst_kw(kw)
return SCons.Scanner.Base(*nargs, **nkw)
def SConsignFile(self, name=".sconsign", dbm_module=None):
if name is not None:
name = self.subst(name)
if not os.path.isabs(name):
name = os.path.join(str(self.fs.SConstruct_dir), name)
if name:
name = os.path.normpath(name)
sconsign_dir = os.path.dirname(name)
if sconsign_dir and not os.path.exists(sconsign_dir):
self.Execute(SCons.Defaults.Mkdir(sconsign_dir))
SCons.SConsign.File(name, dbm_module)
def SideEffect(self, side_effect, target):
"""Tell scons that side_effects are built as side
effects of building targets."""
side_effects = self.arg2nodes(side_effect, self.fs.Entry)
targets = self.arg2nodes(target, self.fs.Entry)
for side_effect in side_effects:
if side_effect.multiple_side_effect_has_builder():
raise SCons.Errors.UserError("Multiple ways to build the same target were specified for: %s" % str(side_effect))
side_effect.add_source(targets)
side_effect.side_effect = 1
self.Precious(side_effect)
for target in targets:
target.side_effects.append(side_effect)
return side_effects
def SourceCode(self, entry, builder):
"""Arrange for a source code builder for (part of) a tree."""
msg = """SourceCode() has been deprecated and there is no replacement.
\tIf you need this function, please contact dev@scons.tigris.org."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceCodeWarning, msg)
entries = self.arg2nodes(entry, self.fs.Entry)
for entry in entries:
entry.set_src_builder(builder)
return entries
def SourceSignatures(self, type):
global _warn_source_signatures_deprecated
if _warn_source_signatures_deprecated:
msg = "The env.SourceSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceSignaturesWarning, msg)
_warn_source_signatures_deprecated = False
type = self.subst(type)
self.src_sig_type = type
if type == 'MD5':
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_source = self._changed_content
elif type == 'timestamp':
self.decide_source = self._changed_timestamp_match
else:
raise UserError("Unknown source signature type '%s'" % type)
def Split(self, arg):
"""This function converts a string or list into a list of strings
or Nodes. This makes things easier for users by allowing files to
be specified as a white-space separated list to be split.
The input rules are:
- A single string containing names separated by spaces. These will be
split apart at the spaces.
- A single Node instance
- A list containing either strings or Node instances. Any strings
in the list are not split at spaces.
In all cases, the function returns a list of Nodes and strings."""
if SCons.Util.is_List(arg):
return list(map(self.subst, arg))
elif SCons.Util.is_String(arg):
return self.subst(arg).split()
else:
return [self.subst(arg)]
def TargetSignatures(self, type):
global _warn_target_signatures_deprecated
if _warn_target_signatures_deprecated:
msg = "The env.TargetSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedTargetSignaturesWarning, msg)
_warn_target_signatures_deprecated = False
type = self.subst(type)
self.tgt_sig_type = type
if type in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_target = self._changed_content
elif type == 'timestamp':
self.decide_target = self._changed_timestamp_match
elif type == 'build':
self.decide_target = self._changed_build
elif type == 'source':
self.decide_target = self._changed_source
else:
raise UserError("Unknown target signature type '%s'"%type)
def Value(self, value, built_value=None):
"""
"""
return SCons.Node.Python.Value(value, built_value)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
variant_dir = self.arg2nodes(variant_dir, self.fs.Dir)[0]
src_dir = self.arg2nodes(src_dir, self.fs.Dir)[0]
self.fs.VariantDir(variant_dir, src_dir, duplicate)
def FindSourceFiles(self, node='.'):
""" returns a list of all source files.
"""
node = self.arg2nodes(node, self.fs.Entry)[0]
sources = []
def build_source(ss):
for s in ss:
if isinstance(s, SCons.Node.FS.Dir):
build_source(s.all_children())
elif s.has_builder():
build_source(s.sources)
elif isinstance(s.disambiguate(), SCons.Node.FS.File):
sources.append(s)
build_source(node.all_children())
# THIS CODE APPEARS TO HAVE NO EFFECT
# # get the final srcnode for all nodes, this means stripping any
# # attached build node by calling the srcnode function
# for file in sources:
# srcnode = file.srcnode()
# while srcnode != file.srcnode():
# srcnode = file.srcnode()
# remove duplicates
return list(set(sources))
def FindInstalledFiles(self):
""" returns the list of all targets of the Install and InstallAs Builder.
"""
from SCons.Tool import install
if install._UNIQUE_INSTALLED_FILES is None:
install._UNIQUE_INSTALLED_FILES = SCons.Util.uniquer_hashables(install._INSTALLED_FILES)
return install._UNIQUE_INSTALLED_FILES
class OverrideEnvironment(Base):
"""A proxy that overrides variables in a wrapped construction
environment by returning values from an overrides dictionary in
preference to values from the underlying subject environment.
This is a lightweight (I hope) proxy that passes through most use of
attributes to the underlying Environment.Base class, but has just
enough additional methods defined to act like a real construction
environment with overridden values. It can wrap either a Base
construction environment, or another OverrideEnvironment, which
can in turn nest arbitrary OverrideEnvironments...
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we get most of those
from proxying the attributes of the subject construction environment.
But because we subclass SubstitutionEnvironment, this class also
has inherited arg2nodes() and subst*() methods; those methods can't
be proxied because they need *this* object's methods to fetch the
values from the overrides dictionary.
"""
def __init__(self, subject, overrides={}):
if __debug__: logInstanceCreation(self, 'Environment.OverrideEnvironment')
self.__dict__['__subject'] = subject
self.__dict__['overrides'] = overrides
# Methods that make this class act like a proxy.
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
setattr(self.__dict__['__subject'], name, value)
# Methods that make this class act like a dictionary.
def __getitem__(self, key):
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].__getitem__(key)
def __setitem__(self, key, value):
if not is_valid_construction_var(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self.__dict__['overrides'][key] = value
def __delitem__(self, key):
try:
del self.__dict__['overrides'][key]
except KeyError:
deleted = 0
else:
deleted = 1
try:
result = self.__dict__['__subject'].__delitem__(key)
except KeyError:
if not deleted:
raise
result = None
return result
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].get(key, default)
def has_key(self, key):
try:
self.__dict__['overrides'][key]
return 1
except KeyError:
return key in self.__dict__['__subject']
def __contains__(self, key):
if self.__dict__['overrides'].__contains__(key):
return 1
return self.__dict__['__subject'].__contains__(key)
def Dictionary(self):
"""Emulates the items() method of dictionaries."""
d = self.__dict__['__subject'].Dictionary().copy()
d.update(self.__dict__['overrides'])
return d
def items(self):
"""Emulates the items() method of dictionaries."""
return list(self.Dictionary().items())
# Overridden private construction environment methods.
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self.__dict__['overrides'].update(dict)
def gvars(self):
return self.__dict__['__subject'].gvars()
def lvars(self):
lvars = self.__dict__['__subject'].lvars()
lvars.update(self.__dict__['overrides'])
return lvars
# Overridden public construction environment methods.
def Replace(self, **kw):
kw = copy_non_reserved_keywords(kw)
self.__dict__['overrides'].update(semi_deepcopy(kw))
# The entry point that will be used by the external world
# to refer to a construction environment. This allows the wrapper
# interface to extend a construction environment for its own purposes
# by subclassing SCons.Environment.Base and then assigning the
# class to SCons.Environment.Environment.
Environment = Base
# An entry point for returning a proxy subclass instance that overrides
# the subst*() methods so they don't actually perform construction
# variable substitution. This is specifically intended to be the shim
# layer in between global function calls (which don't want construction
# variable substitution) and the DefaultEnvironment() (which would
# substitute variables if left to its own devices)."""
#
# We have to wrap this in a function that allows us to delay definition of
# the class until it's necessary, so that when it subclasses Environment
# it will pick up whatever Environment subclass the wrapper interface
# might have assigned to SCons.Environment.Environment.
def NoSubstitutionProxy(subject):
class _NoSubstitutionProxy(Environment):
def __init__(self, subject):
self.__dict__['__subject'] = subject
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
return setattr(self.__dict__['__subject'], name, value)
def raw_to_mode(self, dict):
try:
raw = dict['raw']
except KeyError:
pass
else:
del dict['raw']
dict['mode'] = raw
def subst(self, string, *args, **kwargs):
return string
def subst_kw(self, kw, *args, **kwargs):
return kw
def subst_list(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst_list(*nargs, **nkw)
def subst_target_source(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst(*nargs, **nkw)
return _NoSubstitutionProxy(subject)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bcorbet/SickRage
|
refs/heads/master
|
lib/imdb/parser/http/searchPersonParser.py
|
76
|
"""
parser.http.searchPersonParser module (imdb package).
This module provides the HTMLSearchPersonParser class (and the
search_person_parser instance), used to parse the results of a search
for a given person.
E.g., when searching for the name "Mel Gibson", the parsed page would be:
http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20
Copyright 2004-2013 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
def _cleanName(n):
"""Clean the name in a title tag."""
if not n:
return u''
n = n.replace('Filmography by type for', '') # FIXME: temporary.
return n
class DOMBasicPersonParser(DOMBasicMovieParser):
"""Simply get the name of a person and the imdbID.
It's used by the DOMHTMLSearchPersonParser class to return a result
for a direct match (when a search on IMDb results in a single
person, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1)
_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)',
re.I | re.M)
class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for persons."""
_BaseParser = DOMBasicPersonParser
_notDirectHitTitle = '<title>find - imdb'
_titleBuilder = lambda self, x: build_name(x, canonical=True)
_linkPrefix = '/name/nm'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'index': "./text()[1]",
'akas': ".//div[@class='_imdbpyAKA']/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
analyze_name((x.get('name') or u'') + \
(x.get('index') or u''),
canonical=1), x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[@class='result_text']/a[starts-with(@href, '/name/nm')]/..",
attrs=_attrs)]
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:10240].lower():
html_string = _reAKASp.sub(
r'\1<div class="_imdbpyAKA">\2::</div>\3',
html_string)
return DOMHTMLSearchMovieParser.preprocess_string(self, html_string)
_OBJECTS = {
'search_person_parser': ((DOMHTMLSearchPersonParser,),
{'kind': 'person', '_basic_parser': DOMBasicPersonParser})
}
|
intermezzo-fr/pipreqs
|
refs/heads/master
|
tests/__init__.py
|
14224
|
# -*- coding: utf-8 -*-
|
ThinkingBridge/platform_external_chromium_org
|
refs/heads/kitkat
|
third_party/protobuf/python/google/protobuf/internal/wire_format.py
|
561
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
|
chrisndodge/edx-platform
|
refs/heads/master
|
pavelib/utils/test/utils.py
|
16
|
"""
Helper functions for test tasks
"""
from paver.easy import sh, task, cmdopts
from pavelib.utils.envs import Env
from pavelib.utils.timer import timed
import os
import re
import subprocess
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost')
MINIMUM_FIREFOX_VERSION = 28.0
__test__ = False # do not collect
@task
@timed
def clean_test_files():
"""
Clean fixture files used by tests and .pyc files
"""
sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads")
# This find command removes all the *.pyc files that aren't in the .git
# directory. See this blog post for more details:
# http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html
sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;")
sh("rm -rf test_root/log/auto_screenshots/*")
sh("rm -rf /tmp/mako_[cl]ms")
def clean_dir(directory):
"""
Delete all the files from the specified directory.
"""
# We delete the files but preserve the directory structure
# so that coverage.py has a place to put the reports.
sh('find {dir} -type f -delete'.format(dir=directory))
@task
@cmdopts([
('skip-clean', 'C', 'skip cleaning repository before running tests'),
('skip_clean', None, 'deprecated in favor of skip-clean'),
])
@timed
def clean_reports_dir(options):
"""
Clean coverage files, to ensure that we don't use stale data to generate reports.
"""
if getattr(options, 'skip_clean', False):
print '--skip-clean is set, skipping...'
return
# We delete the files but preserve the directory structure
# so that coverage.py has a place to put the reports.
reports_dir = Env.REPORT_DIR.makedirs_p()
clean_dir(reports_dir)
@task
@timed
def clean_mongo():
"""
Clean mongo test databases
"""
sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
host=MONGO_HOST,
port=MONGO_PORT_NUM,
repo_root=Env.REPO_ROOT,
))
def check_firefox_version():
"""
Check that firefox is the correct version.
"""
expected_firefox_ver = "Mozilla Firefox " + str(MINIMUM_FIREFOX_VERSION)
firefox_ver_string = subprocess.check_output("firefox --version", shell=True).strip()
firefox_version_regex = re.compile(r"Mozilla Firefox (\d+.\d+)")
try:
firefox_ver = float(firefox_version_regex.search(firefox_ver_string).group(1))
except AttributeError:
firefox_ver = 0.0
debian_location = 'https://s3.amazonaws.com/vagrant.testeng.edx.org/'
debian_package = 'firefox-mozilla-build_42.0-0ubuntu1_amd64.deb'
debian_path = '{location}{package}'.format(location=debian_location, package=debian_package)
if firefox_ver < MINIMUM_FIREFOX_VERSION:
raise Exception(
'Required firefox version not found.\n'
'Expected: {expected_version}; Actual: {actual_version}.\n\n'
'As the vagrant user in devstack, run the following:\n\n'
'\t$ sudo wget -O /tmp/firefox_42.deb {debian_path}\n'
'\t$ sudo apt-get remove firefox\n\n'
'\t$ sudo gdebi -nq /tmp/firefox_42.deb\n\n'
'Confirm the new version:\n'
'\t$ firefox --version\n'
'\t{expected_version}'.format(
actual_version=firefox_ver,
expected_version=expected_firefox_ver,
debian_path=debian_path
)
)
|
juanyaw/python
|
refs/heads/develop
|
cpython/Lib/encodings/cp875.py
|
272
|
""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Genymobile/genymotion-kernel
|
refs/heads/genymotion-goldfish-3.10
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
kingvuplus/bh-s
|
refs/heads/master
|
lib/python/Screens/Scart.py
|
126
|
from Screen import Screen
from MessageBox import MessageBox
from Components.AVSwitch import AVSwitch
from Tools import Notifications
class Scart(Screen):
def __init__(self, session, start_visible=True):
Screen.__init__(self, session)
self.msgBox = None
self.notificationVisible = None
self.avswitch = AVSwitch()
if start_visible:
self.onExecBegin.append(self.showMessageBox)
self.msgVisible = None
else:
self.msgVisible = False
def showMessageBox(self):
if self.msgVisible is None:
self.onExecBegin.remove(self.showMessageBox)
self.msgVisible = False
if not self.msgVisible:
self.msgVisible = True
self.avswitch.setInput("SCART")
if not self.session.in_exec:
self.notificationVisible = True
Notifications.AddNotificationWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR, msgBoxID = "scart_msgbox")
else:
self.msgBox = self.session.openWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR)
def MsgBoxClosed(self, *val):
self.msgBox = None
self.switchToTV()
def switchToTV(self, *val):
if self.msgVisible:
if self.msgBox:
self.msgBox.close() # ... MsgBoxClosed -> switchToTV again..
return
self.avswitch.setInput("ENCODER")
self.msgVisible = False
if self.notificationVisible:
self.avswitch.setInput("ENCODER")
self.notificationVisible = False
for notification in Notifications.current_notifications:
try:
if notification[1].msgBoxID == "scart_msgbox":
notification[1].close()
except:
print "other notification is open. try another one."
|
chiamingyen/pygroup_sqlite
|
refs/heads/master
|
wsgi/static/Brython2.1.4-20140810-083054/Lib/unittest/test/dummy.py
|
1061
|
# Empty module for testing the loading of modules
|
maliciamrg/xbmc-addon-tvtumbler
|
refs/heads/master
|
tvtumbler/housekeeper.py
|
2
|
'''
This file is part of TvTumbler.
@author: Dermot Buckley
@copyright: Copyright (c) 2013, Dermot Buckley
@license: GPL
@contact: info@tvtumbler.com
'''
import time
import xbmc
from . import logger, fastcache, blacklist, epdb, showsettings
from .names import scene
_fastcache_expire_last_run = time.time()
_blacklist_expire_last_run = time.time()
_epdb_last_run = time.time()
_showsettings_purge_last_run = time.time()
def run():
global _fastcache_expire_last_run, _blacklist_expire_last_run, _epdb_last_run, _showsettings_purge_last_run
logger.debug('housekeeper - run')
if xbmc.Player().isPlaying():
logger.debug('XBMC is Playing, skipping housekeeping')
return
from . import main
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _fastcache_expire_last_run > 60 * 60 * 24: # 24 hrs
fastcache.expire_old_records()
_fastcache_expire_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _blacklist_expire_last_run > 60 * 60 * 36: # 36 hrs
blacklist.expire_old_records()
_blacklist_expire_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
scene.update_if_needed()
if xbmc.abortRequested or main.shutdownRequested:
return
# We try to refresh the shows every 42 minutes. Only shows actually needing a refresh
# will be refreshed. We limit each run to 10 shows (the 10 oldest).
if time.time() - _epdb_last_run > 60 * 42:
epdb.refresh_needed_shows(show_limit=10)
_epdb_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _showsettings_purge_last_run > 60 * 60 * 13: # 13 hours
showsettings.purge_missing_shows()
_showsettings_purge_last_run = time.time()
logger.debug('housekeeper is finished')
|
TEAM-Gummy/platform_external_chromium_org
|
refs/heads/kk4.4
|
tools/telemetry/telemetry/exception_formatter.py
|
25
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print prettier and more detailed exceptions."""
import math
import os
import sys
import traceback
from telemetry.core import util
def InstallUnhandledExceptionFormatter():
sys.excepthook = PrintFormattedException
def PrintFormattedException(exception_class, exception, tb):
"""Prints an Exception in a more useful format than the default.
TODO(tonyg): Consider further enhancements. For instance:
- Report stacks to maintainers like depot_tools does.
- Add a debug flag to automatically start pdb upon exception.
"""
def _GetFinalFrame(frame):
final_frame = None
while frame is not None:
final_frame = frame
frame = frame.tb_next
return final_frame
def _AbbreviateMiddle(target, middle, length):
assert length >= 0, 'Must provide positive length'
assert len(middle) <= length, 'middle must not be greater than length'
if len(target) <= length:
return target
half_length = (length - len(middle)) / 2.
return '%s%s%s' % (target[:int(math.floor(half_length))],
middle,
target[-int(math.ceil(half_length)):])
base_dir = os.path.abspath(util.GetChromiumSrcDir())
formatted_exception = traceback.format_exception(
exception_class, exception, tb)
extracted_tb = traceback.extract_tb(tb)
traceback_header = formatted_exception[0].strip()
exception = ''.join([l[2:] if l[:2] == ' ' else l for l in
traceback.format_exception_only(exception_class,
exception)])
local_variables = [(variable, value) for variable, value in
_GetFinalFrame(tb).tb_frame.f_locals.iteritems()
if variable != 'self']
# Format the traceback.
print >> sys.stderr
print >> sys.stderr, traceback_header
for filename, line, function, text in extracted_tb:
filename = os.path.abspath(filename)
if filename.startswith(base_dir):
filename = filename[len(base_dir)+1:]
print >> sys.stderr, ' %s at %s:%d' % (function, filename, line)
print >> sys.stderr, ' %s' % text
# Format the locals.
if local_variables:
print >> sys.stderr
print >> sys.stderr, 'Locals:'
longest_variable = max([len(v) for v, _ in local_variables])
for variable, value in sorted(local_variables):
value = repr(value)
possibly_truncated_value = _AbbreviateMiddle(value, ' ... ', 1024)
truncation_indication = ''
if len(possibly_truncated_value) != len(value):
truncation_indication = ' (truncated)'
print >> sys.stderr, ' %s: %s%s' % (variable.ljust(longest_variable + 1),
possibly_truncated_value,
truncation_indication)
# Format the exception.
print >> sys.stderr
print >> sys.stderr, exception
|
strk/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/r_li_patchnum.py
|
45
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_patchnum.py
----------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback)
|
agentxan/nzbToMedia
|
refs/heads/master
|
libs/requests/packages/urllib3/contrib/ntlmpool.py
|
1009
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
codingisacopingstrategy/unicornify
|
refs/heads/master
|
background.py
|
2
|
# Copyright 2010 Benjamin Dumke
#
# This file is part of Unicornify
#
# Unicornify is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Unicornify is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the NU Affero General Public License
# along with Unicornify; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
from graphics import SquareImage, hls_to_rgb
from core import Data
class BackgroundData(Data):
def __init__(self):
self._data = { "sky_hue": 60,
"sky_sat": 50,
"land_hue": 120,
"land_sat": 50,
"horizon": .6,
"rainbow_foot": .5, # [0,1]-based x-coordinate of the point where the rainbow hits the horizon
"rainbow_dir": -1,
"rainbow_height": 1.5,
"rainbow_band_width": 5,
"land_light": 40,
"cloud_positions": [(.6, .25)],
"cloud_sizes": [(.1, 1.5)], # the first is relative to the image, the second is relative to the first
"cloud_lightnesses": [95],
}
def randomize(self, randomizer):
choice, randint, random = randomizer.choice, randomizer.randint, randomizer.random
self.sky_hue = randint(0, 359)
self.sky_sat = randint(30, 70)
self.land_hue = randint(0, 359)
self.land_sat = randint(20, 60)
self.horizon = .5 + random() * .2
self.rainbow_foot = .2 + random() * .6
self.rainbow_dir = choice((-1, 1))
self.rainbow_height = .5 + random() * 1.5
self.rainbow_band_width = .01 + random()* .02
self.land_light = randint(20, 50)
def randomize2(self, randomizer):
choice, randint, random = randomizer.choice, randomizer.randint, randomizer.random
self.cloud_positions = [(random(), (.3 + random() * .6) * self.horizon) for i in xrange(randint(1, 3))]
self.cloud_sizes = [(random()*.04 + .02, random()*.7 + 1.3) for c in self.cloud_positions]
self.cloud_lightnesses = [randint(75, 90) for c in self.cloud_positions]
def get_background(size, data): #return size is 2*size!
im = SquareImage(size * 2, data.sky_col(60), data.sky_col(10))
horizon_pix = int(im.size * data.horizon)
center = (im.size * (data.rainbow_foot + data.rainbow_dir * data.rainbow_height), horizon_pix)
outer_radius = data.rainbow_height * im.size
delta = data.rainbow_band_width * im.size
im.save()
for w in xrange(7):
col = hls_to_rgb(w * 45, 50, 100)
im.circle(center, outer_radius - w * delta, col)
im.circle(center, outer_radius - 7 * delta, im.RESTORE)
land1 = data.land_col(data.land_light)
land2 = data.land_col(data.land_light / 2)
im.hor_gradient(land1, land2, 0, 2 * size - 1, horizon_pix, 2 * size - 1)
for pos, sizes, lightness in zip(data.cloud_positions, data.cloud_sizes, data.cloud_lightnesses):
cloud(im, (im.size * pos[0], im.size * pos[1]), sizes[0] * im.size, sizes[1] * sizes[0] * im.size, data.sky_col(lightness))
return im
def cloud(img, pos, size1, size2, color):
"""sizeX is a radius of one of the circles. size2 should be
between 100 and 200% of size1. pos is bottom center."""
x, y = map(int, pos)
size1, size2 = int(size1), int(size2)
img.circle((x - 2 * size1, y - size1 - 1), size1, color)
img.circle((x + 2 * size1, y - size1 - 1), size1, color)
img.top_half_circle((x, y-size1), size2, color)
for ly in xrange(y - size1, y + 1):
img.hor_line(color, x - 2 * size1, x + 2 * size1, ly)
|
CubicERP/geraldo
|
refs/heads/master
|
site/newsite/site-geraldo/django/utils/html.py
|
26
|
"""HTML utilities suitable for global use."""
import re
import string
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.http import urlquote
# Configuration for urlize() function.
LEADING_PUNCTUATION = ['(', '<', '<']
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>']
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
del x # Temporary variable
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, unicode)
def conditional_escape(html):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(html, SafeData):
return html
else:
return escape(html)
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', force_unicode(value)) # normalize newlines
paras = re.split('\n{2,}', value)
if autoescape:
paras = [u'<p>%s</p>' % escape(p.strip()).replace('\n', '<br />') for p in paras]
else:
paras = [u'<p>%s</p>' % p.strip().replace('\n', '<br />') for p in paras]
return u'\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, unicode)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return re.sub(r'<[^>]*?>', '', force_unicode(value))
strip_tags = allow_lazy(strip_tags)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_unicode(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, unicode)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value))
strip_entities = allow_lazy(strip_entities, unicode)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_unicode(value))
fix_ampersands = allow_lazy(fix_ampersands, unicode)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links and links ending in .org, .net or
.com. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right
thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_unicode(text))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
# Make URL we want to point to.
url = None
if middle.startswith('http://') or middle.startswith('https://'):
url = urlquote(middle, safe='/&=:;#?+*')
elif middle.startswith('www.') or ('@' not in middle and \
middle and middle[0] in string.ascii_letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
url = urlquote('http://%s' % middle, safe='/&=:;#?+*')
elif '@' in middle and not ':' in middle and simple_email_re.match(middle):
url = 'mailto:%s' % middle
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return u''.join(words)
urlize = allow_lazy(urlize, unicode)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_unicode(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return u'<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, unicode)
|
arthurchan1111/EventPlanner
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py
|
1407
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/engadget.py
|
18
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .fivemin import FiveMinIE
from ..utils import (
url_basename,
)
class EngadgetIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://www.engadget.com/
(?:video/5min/(?P<id>\d+)|
[\d/]+/.*?)
'''
_TEST = {
'url': 'http://www.engadget.com/video/5min/518153925/',
'md5': 'c6820d4828a5064447a4d9fc73f312c9',
'info_dict': {
'id': '518153925',
'ext': 'mp4',
'title': 'Samsung Galaxy Tab Pro 8.4 Review',
},
'add_ie': ['FiveMin'],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if video_id is not None:
return FiveMinIE._build_result(video_id)
else:
title = url_basename(url)
webpage = self._download_webpage(url, title)
ids = re.findall(r'<iframe[^>]+?playList=(\d+)', webpage)
return {
'_type': 'playlist',
'title': title,
'entries': [FiveMinIE._build_result(id) for id in ids]
}
|
adoosii/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/views/tests/test_checklists.py
|
104
|
""" Unit tests for checklist methods in views.py. """
from contentstore.utils import reverse_course_url
from contentstore.views.checklist import expand_checklist_action_url
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
import json
from contentstore.tests.utils import CourseTestCase
class ChecklistTestCase(CourseTestCase):
""" Test for checklist get and put methods. """
def setUp(self):
""" Creates the test course. """
super(ChecklistTestCase, self).setUp()
self.course = CourseFactory.create(org='mitX', number='333', display_name='Checklists Course')
self.checklists_url = self.get_url()
def get_url(self, checklist_index=None):
url_args = {'checklist_index': checklist_index} if checklist_index else None
return reverse_course_url('checklists_handler', self.course.id, kwargs=url_args)
def get_persisted_checklists(self):
""" Returns the checklists as persisted in the modulestore. """
return modulestore().get_item(self.course.location).checklists
def compare_checklists(self, persisted, request):
"""
Handles url expansion as possible difference and descends into guts
"""
self.assertEqual(persisted['short_description'], request['short_description'])
expanded_checklist = expand_checklist_action_url(self.course, persisted)
for pers, req in zip(expanded_checklist['items'], request['items']):
self.assertEqual(pers['short_description'], req['short_description'])
self.assertEqual(pers['long_description'], req['long_description'])
self.assertEqual(pers['is_checked'], req['is_checked'])
self.assertEqual(pers['action_url'], req['action_url'])
self.assertEqual(pers['action_text'], req['action_text'])
self.assertEqual(pers['action_external'], req['action_external'])
def test_get_checklists(self):
""" Tests the get checklists method and URL expansion. """
response = self.client.get(self.checklists_url)
self.assertContains(response, "Getting Started With Studio")
# Verify expansion of action URL happened.
self.assertContains(response, 'course_team/mitX/333/Checklists_Course')
# Verify persisted checklist does NOT have expanded URL.
checklist_0 = self.get_persisted_checklists()[0]
self.assertEqual('ManageUsers', get_action_url(checklist_0, 0))
payload = response.content
# Now delete the checklists from the course and verify they get repopulated (for courses
# created before checklists were introduced).
self.course.checklists = None
# Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore
self.course.save()
modulestore().update_item(self.course, self.user.id)
self.assertEqual(self.get_persisted_checklists(), None)
response = self.client.get(self.checklists_url)
self.assertEqual(payload, response.content)
def test_get_checklists_html(self):
""" Tests getting the HTML template for the checklists page). """
response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html')
self.assertContains(response, "Getting Started With Studio")
# The HTML generated will define the handler URL (for use by the Backbone model).
self.assertContains(response, self.checklists_url)
def test_update_checklists_no_index(self):
""" No checklist index, should return all of them. """
returned_checklists = json.loads(self.client.get(self.checklists_url).content)
# Verify that persisted checklists do not have expanded action URLs.
# compare_checklists will verify that returned_checklists DO have expanded action URLs.
pers = self.get_persisted_checklists()
self.assertEqual('CourseOutline', get_first_item(pers[1]).get('action_url'))
for pay, resp in zip(pers, returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_index_ignored_on_get(self):
""" Checklist index ignored on get. """
update_url = self.get_url(1)
returned_checklists = json.loads(self.client.get(update_url).content)
for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_post_no_index(self):
""" No checklist index, will error on post. """
response = self.client.post(self.checklists_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index_out_of_range(self):
""" Checklist index out of range, will error on post. """
update_url = self.get_url(100)
response = self.client.post(update_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index(self):
""" Check that an update of a particular checklist works. """
update_url = self.get_url(1)
payload = self.course.checklists[1]
self.assertFalse(get_first_item(payload).get('is_checked'))
self.assertEqual('CourseOutline', get_first_item(payload).get('action_url'))
get_first_item(payload)['is_checked'] = True
returned_checklist = json.loads(self.client.ajax_post(update_url, payload).content)
self.assertTrue(get_first_item(returned_checklist).get('is_checked'))
persisted_checklist = self.get_persisted_checklists()[1]
# Verify that persisted checklist does not have expanded action URLs.
# compare_checklists will verify that returned_checklist DOES have expanded action URLs.
self.assertEqual('CourseOutline', get_first_item(persisted_checklist).get('action_url'))
self.compare_checklists(persisted_checklist, returned_checklist)
def test_update_checklists_delete_unsupported(self):
""" Delete operation is not supported. """
update_url = self.get_url(100)
response = self.client.delete(update_url)
self.assertEqual(response.status_code, 405)
def test_expand_checklist_action_url(self):
"""
Tests the method to expand checklist action url.
"""
def test_expansion(checklist, index, stored, expanded):
"""
Tests that the expected expanded value is returned for the item at the given index.
Also verifies that the original checklist is not modified.
"""
self.assertEqual(get_action_url(checklist, index), stored)
expanded_checklist = expand_checklist_action_url(self.course, checklist)
self.assertEqual(get_action_url(expanded_checklist, index), expanded)
# Verify no side effect in the original list.
self.assertEqual(get_action_url(checklist, index), stored)
test_expansion(self.course.checklists[0], 0, 'ManageUsers', '/course_team/mitX/333/Checklists_Course')
test_expansion(self.course.checklists[1], 1, 'CourseOutline', '/course/mitX/333/Checklists_Course')
test_expansion(self.course.checklists[2], 0, 'http://help.edge.edx.org/', 'http://help.edge.edx.org/')
def get_first_item(checklist):
""" Returns the first item from the checklist. """
return checklist['items'][0]
def get_action_url(checklist, index):
"""
Returns the action_url for the item at the specified index in the given checklist.
"""
return checklist['items'][index]['action_url']
|
oe-alliance/oe-alliance-enigma2
|
refs/heads/master
|
lib/python/Components/Renderer/PiconBg.py
|
62
|
from Renderer import Renderer
from enigma import ePixmap
from Components.config import config
from Tools.Directories import SCOPE_ACTIVE_SKIN, resolveFilename
class PiconBg(Renderer):
def __init__(self):
Renderer.__init__(self)
self.pngname = ""
GUI_WIDGET = ePixmap
def postWidgetCreate(self, instance):
self.changed((self.CHANGED_DEFAULT,))
def changed(self, what):
if self.instance:
pngname = ""
if what[0] == 1 or what[0] == 3:
pngname = resolveFilename(SCOPE_ACTIVE_SKIN, "piconbg/"+config.usage.show_picon_bkgrn.value + ".png")
if self.pngname != pngname:
if pngname:
self.instance.setScale(1)
self.instance.setPixmapFromFile(pngname)
self.instance.show()
else:
self.instance.hide()
self.pngname = pngname
|
AMObox/teammaniac
|
refs/heads/master
|
plugin.video.p2psport/default.py
|
6
|
# -*- coding: utf-8 -*-
import re
import urllib2
import HTMLParser
import urllib,urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
import requests
from BeautifulSoup import BeautifulSoup as bs
from utils.webutils import *
from scrapers import *
try:
from addon.common.addon import Addon
from addon.common.net import Net
except:
print 'Failed to import script.module.addon.common'
xbmcgui.Dialog().ok("Import Failure", "Failed to import addon.common", "A component needed by P2P Sport is missing on your system", "Please visit www.tvaddons.ag.com for support")
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
params=urlparse.parse_qs(sys.argv[2][1:])
addon = Addon('plugin.video.p2psport', sys.argv)
AddonPath = addon.get_path()
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
mode = args.get('mode', None)
my_addon = xbmcaddon.Addon()
if mode is None:
url = build_url({'mode': 'av'})
li = xbmcgui.ListItem('Arenavision.in',iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'roja'})
li = xbmcgui.ListItem('Rojadirecta.me',iconImage='http://www.rojadirecta.me/static/roja.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ws'})
li = xbmcgui.ListItem('Livefootball.ws',iconImage='http://www.userlogos.org/files/logos/clubber/football_ws___.PNG')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'livefootballvideo.com'})
li = xbmcgui.ListItem('Livefootballvideo.com',iconImage='https://pbs.twimg.com/profile_images/3162217818/2ee4b2f728ef9867d4e1d86e17bb2ef5.jpeg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'livefooty'})
li = xbmcgui.ListItem('Livefootballol.com',iconImage='http://www.livefootballol.com/images/logo.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
# url = build_url({'mode': 'livefootF1'})
# li = xbmcgui.ListItem('Livefootballol.com (F1)',iconImage='http://www.livefootballol.com/images/logo.png')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
url = build_url({'mode': 'phace'})
li = xbmcgui.ListItem('Sport Channels 1',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'serbplus'})
li = xbmcgui.ListItem('Sport Channels 2',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Sport Channels 3',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': '1ttv'})
li = xbmcgui.ListItem('1torrent.tv',iconImage='http://s3.hostingkartinok.com/uploads/images/2013/06/6e4452212490ac0a66e358c97707ef77.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv_sport', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Torrent-tv.ru (Sport)',iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv_all', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Torrent-tv.ru',iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
# url = build_url({'mode': 'soccer188'})
# li = xbmcgui.ListItem('Soccer188',iconImage='')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
# url = build_url({'mode': '247'})
# li = xbmcgui.ListItem('Livesports 24/7',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='streamhub':
streamhub_cats()
elif mode[0]=='open_streamhub_cat':
url=params['url'][0]
open_streamhub_cat(url)
elif mode[0]=='open_streamhub_event':
url=params['url'][0]
open_streamhub_event(url)
elif mode[0]=='soccer188':
soccer188()
elif mode[0]=='play_sopc':
url=params['url'][0]
name=params['name'][0]
play_sop(url,name)
elif mode[0]=='ttv_sport':
ttv_sport()
elif mode[0]=='serbplus':
serbplus()
elif mode[0]=='play_serb':
url=params['url'][0]
name=params['name'][0]
resolve_roja(url,name)
elif mode[0]=='phace':
phace()
elif mode[0]=='247':
url = build_url({'mode': 'schedule_247'})
li = xbmcgui.ListItem('Event schedule',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'all_247'})
li = xbmcgui.ListItem('All channels',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='open_247_event':
url=params['url'][0]
open_247_event(url)
elif mode[0]=='all_247':
all_live247()
elif mode[0]=='schedule_247':
schedule247()
elif mode[0]=='open_247_stream':
url='http://pilkalive.weebly.com'+params['url'][0]
name=params['name'][0]
play247(url,name)
elif mode[0]=='livefootballvideo.com':
livefoot_com()
elif mode[0]=='ttv_all':
ttv_cats()
elif mode[0]=='open_ttv_cat':
cat=params['cat'][0]
tag=params['channels'][0]
get_ttv_cat(cat,tag)
elif mode[0]=='1ttv':
one_ttv_cats()
elif mode[0]=='open_1ttv_cat':
tag=params['tag'][0]
name=params['name'][0]
open_1ttv_cat(tag,name)
elif mode[0]=='open_1ttv_channel':
url=params['url'][0]
open_1ttv_channel(url)
elif mode[0]=='ws':
livefootballws_events()
elif mode[0]=='roja':
rojadirecta_events()
elif mode[0]=='ttv':
get_ttv()
elif mode[0]=='open_ttv_stream':
url=params['url'][0]
name=params['name'][0]
open_ttv_stream(url,name)
elif mode[0]=='av':
url = build_url({'mode': 'av_schedule'})
li = xbmcgui.ListItem('[COLOR orange]Schedule / Agenda[/COLOR]',iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(10):
url = build_url({'mode': 'av_ace','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i+1)})
li = xbmcgui.ListItem('Arenavision %s'%(i+1),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(11,13):
url = build_url({'mode': 'av_rand','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(13,23):
url = build_url({'mode': 'av_sop','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(23,25):
url = build_url({'mode': 'av_rand','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='av_ace':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena(url,name)
except:
play_arena_sop(url,name)
elif mode[0]=='av_sop':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena_sop(url,name)
except:
play_arena(url,name)
elif mode[0]=='av_rand':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena(url,name)
except:
play_arena_sop(url,name)
elif mode[0]=='open_roja_stream':
url='http://www.rojadirecta.me/'+params['url'][0]
name=params['name'][0]
resolve_roja(url,name)
elif mode[0]=='av_schedule':
arenavision_schedule()
elif mode[0]=='av_open':
channels=((params['channels'][0]).replace('[','').replace(']','').replace("'",'').replace('u','').replace(' ','')).split(',')
name=params['name'][0]
sources=[]
for i in range(len(channels)):
title='AV%s'%channels[i]
sources+=[title]
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', sources)
if index>-1:
url=sources[index]
url='http://arenavision.in/'+url.lower()
try: play_arena(url,name)
except: play_arena_sop(url,name)
elif mode[0]=='livefooty':
livefootballol()
elif mode[0]=='open_livefoot':
url='http://www.livefootballol.com'+params['url'][0]
name=params['name'][0]
get_livefoot(url,name)
elif mode[0]=='open_livefoot_stream':
url=params['url'][0]
name=params['name'][0]
play_livefoot(url,name)
elif mode[0]=='livefootF1':
livefootF1()
elif mode[0]=='open_livefoot.com_stream':
url=params['url'][0]
name=params['name'][0]
play_livefoot(url,name)
open_com_event(name,url)
elif mode[0]=='open_ws_stream':
url=params['url'][0]
livefootballws_streams(url)
|
aldian/tensorflow
|
refs/heads/master
|
tensorflow/python/feature_column/feature_column.py
|
9
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned ${tf.estimator.Estimator}s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosphy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.util import nest
def _internal_input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
scope=None):
"""See input_layer. `scope` is a name or variable scope to use."""
feature_columns = _clean_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, _DenseColumn):
raise ValueError(
'Items of feature_columns must be a _DenseColumn. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
# a non-None `scope` can allow for variable reuse, when, e.g., this function
# is wrapped by a `make_template`.
with variable_scope.variable_scope(
scope, default_name='input_layer', values=features.values()):
builder = _LazyBuilder(features)
output_tensors = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
output_tensors.append(
array_ops.reshape(tensor, shape=(batch_size, num_elements)))
if cols_to_vars is not None:
# Retrieve any variables created (some _DenseColumn's don't create
# variables, in which case an empty list is returned).
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
_verify_static_batch_size_equality(output_tensors, ordered_columns)
return array_ops.concat(output_tensors, 1)
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(features, feature_columns, weight_collections,
trainable, cols_to_vars)
# TODO(akshayka): InputLayer should be a subclass of Layer, and it
# should implement the logic in input_layer using Layer's build-and-call
# paradigm; input_layer should create an instance of InputLayer and
# return the result of inovking its apply method, just as functional layers do.
class InputLayer(object):
"""An object-oriented version of `input_layer` that reuses variables."""
def __init__(self,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""See `input_layer`."""
self._feature_columns = feature_columns
self._weight_collections = weight_collections
self._trainable = trainable
self._cols_to_vars = cols_to_vars
self._input_layer_template = template.make_template(
'feature_column_input_layer',
_internal_input_layer,
create_scope_now_=True)
self._scope = self._input_layer_template.variable_scope
def __call__(self, features):
return self._input_layer_template(
features=features,
feature_columns=self._feature_columns,
weight_collections=self._weight_collections,
trainable=self._trainable,
cols_to_vars=None,
scope=self._scope)
@property
def non_trainable_variables(self):
return self._input_layer_template.non_trainable_variables
@property
def non_trainable_weights(self):
return self._input_layer_template.non_trainable_weights
@property
def trainable_variables(self):
return self._input_layer_template.trainable_variables
@property
def trainable_weights(self):
return self._input_layer_template.trainable_weights
@property
def variables(self):
return self._input_layer_template.variables
@property
def weights(self):
return self._input_layer_template.weights
def linear_model(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a linear prediction `Tensor` based on given `feature_columns`.
This function generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `linear_model` treats categorical columns as
`indicator_column`s while `input_layer` explicitly requires wrapping each
of them with an `embedding_column` or an `indicator_column`.
Example:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features, columns)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns. It combines each sparse columns independently.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
weight_collections: A list of collection names to which the Variable will be
added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to associated list of `Variable`s. For
example, after the call, we might have cols_to_vars = {
_NumericColumn(
key='numeric_feature1', shape=(1,):
[<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],
'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],
_NumericColumn(
key='numeric_feature2', shape=(2,)):
[<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}
If a column creates no variables, its value will be an empty list. Note
that cols_to_vars will also contain a string key 'bias' that maps to a
list of Variables.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its shape
is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: if an item in `feature_columns` is neither a `_DenseColumn`
nor `_CategoricalColumn`.
"""
feature_columns = _clean_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, (_DenseColumn, _CategoricalColumn)):
raise ValueError('Items of feature_columns must be either a _DenseColumn '
'or _CategoricalColumn. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
with variable_scope.variable_scope(
None, default_name='linear_model', values=features.values()):
weighted_sums = []
ordered_columns = []
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
ordered_columns.append(column)
weighted_sum = _create_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable)
weighted_sums.append(weighted_sum)
if cols_to_vars is not None:
# Retrieve the variables created.
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
_verify_static_batch_size_equality(weighted_sums, ordered_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
bias = variable_scope.get_variable(
'bias_weights',
shape=[units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
predictions = nn_ops.bias_add(
predictions_no_bias, bias, name='weighted_sum')
if cols_to_vars is not None:
# Add the bias to cols_to_vars as well, converting the Variable or
# PartitionedVariable to a list of Variable's.
if isinstance(bias, variables.Variable):
cols_to_vars['bias'] = [bias]
else: # Must be a PartitionedVariable.
cols_to_vars['bias'] = list(bias)
return predictions
def _transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing all the `_FeatureColumn`s.
Returns:
A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _clean_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with ops.name_scope(None, default_name=column.name):
outputs[column] = builder.get(column)
return outputs
def make_parse_example_spec(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in `tf.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError(
'All feature_columns must be _FeatureColumn instances. '
'Given: {}'.format(column))
config = column._parse_example_spec # pylint: disable=protected-access
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
def embedding_column(
categorical_column, dimension, combiner='mean', initializer=None,
ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None,
trainable=True):
"""`_DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `_CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `_CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`_DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
return _EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def _shared_embedding_columns(
categorical_columns, dimension, combiner='mean', initializer=None,
shared_embedding_collection_name=None, ckpt_to_load_from=None,
tensor_name_in_ckpt=None, max_norm=None, trainable=True):
"""List of `_DenseColumn`s that convert from sparse, categorical input.
This is similar to `embedding_column`, except that that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of `_CategoricalColumn` created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of `_CategoricalColumn`s created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of `_DenseColumn`s that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
if not isinstance(c0, _CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0, _WeightedCategoricalColumn):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(c, _WeightedCategoricalColumn):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(_SharedEmbeddingColumn(
categorical_column=column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable))
return result
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = _check_default_value(shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return _NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, _NumericColumn):
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if (not boundaries or
not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return _BucketizedColumn(source_column, tuple(boundaries))
def _assert_string_or_int(dtype, prefix):
if (dtype != dtypes.string) and (not dtype.is_integer):
raise ValueError(
'{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _HashedCategoricalColumn(key, hash_bucket_size, dtype)
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `_CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
def categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A `_CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
_assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _VocabularyListCategoricalColumn(
key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,
default_value=default_value, num_oov_buckets=num_oov_buckets)
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `_CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `_CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
return _IdentityCategoricalColumn(
key=key, num_buckets=num_buckets, default_value=default_value)
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
Used to wrap any `categorical_column_*` (e.g., to feed to DNN). Use
`embedding_column` if the inputs are sparse.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `_IndicatorColumn`.
"""
return _IndicatorColumn(categorical_column)
def weighted_categorical_column(
categorical_column, weight_feature_key, dtype=dtypes.float32):
"""Applies weight values to a `_CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `_CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `_CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return _WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `_CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `_CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.
ValueError: If any of the keys is `_HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, _CategoricalColumn)):
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except _HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key, _HashedCategoricalColumn):
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return _CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size,
hash_key=hash_key)
class _FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. Following is an example feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. User should not create instances of this.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming and for name_scope."""
pass
@property
def _var_scope_name(self):
"""Returns string. Used for variable_scope. Defaults to self.name."""
return self.name
@abc.abstractmethod
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`).
Uses `inputs` to create an intermediate representation (usually a `Tensor`)
that other feature columns can use.
Example usage of `inputs`:
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will
be used as follows:
```python
raw_tensor = inputs.get('raw')
fc_tensor = inputs.get(input_fc)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of ${tf.parse_example} for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). One possible implementation of
_parse_example_spec is as follows:
```python
spec = {'raw': tf.FixedLenFeature(...)}
spec.update(input_fc._parse_example_spec)
return spec
```
"""
pass
class _DenseColumn(_FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc._get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see ${tf.Variable}).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
"""
pass
def _create_weighted_sum(
column,
builder,
units,
sparse_combiner,
weight_collections,
trainable):
"""Creates a weighted sum for a dense or sparse column for linear_model."""
if isinstance(column, _CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable)
else:
return _create_dense_column_weighted_sum(
column=column,
builder=builder,
units=units,
weight_collections=weight_collections,
trainable=trainable)
def _create_dense_column_weighted_sum(
column, builder, units, weight_collections, trainable):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
weight = variable_scope.get_variable(
name='weights',
shape=[num_elements, units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return math_ops.matmul(tensor, weight, name='weighted_sum')
class _CategoricalColumn(_FeatureColumn):
"""Represents a categorical feature.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a ${tf.SparseTensor} of IDs.
"""
__metaclass__ = abc.ABCMeta
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ['id_tensor', 'weight_tensor'])
@abc.abstractproperty
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to
create `IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}).
"""
pass
def _create_categorical_column_weighted_sum(
column, builder, units, sparse_combiner, weight_collections, trainable):
"""Create a weighted sum of a categorical column for linear_model."""
sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
weight = variable_scope.get_variable(
name='weights',
shape=(column._num_buckets, units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return _safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class _LazyBuilder(object):
"""Handles caching of transformations while building the model.
`_FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`_LazyBuilder` caches all previously transformed columns.
Example:
We're trying to use the following `_FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `_LazyBuilder` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `_LazyBuilder`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `_FeatureColumn` key
means that this `Tensor` is the output of an existing `_FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`_FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `_FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `_FeatureColumn`.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if not isinstance(key, (str, _FeatureColumn)):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
'Provided: {}'.format(key))
if not isinstance(key, _FeatureColumn):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column._transform_feature(self) # pylint: disable=protected-access
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], -1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _shape_offsets(shape):
"""Returns moving offset for each dimension given shape."""
offsets = []
for dim in reversed(shape):
if offsets:
offsets.append(dim * offsets[-1])
else:
offsets.append(dim)
offsets.reverse()
return offsets
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _clean_feature_columns(feature_columns):
"""Verifies and normalizes `feature_columns` input."""
if isinstance(feature_columns, _FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError('Items of feature_columns must be a _FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = dict()
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return feature_columns
class _NumericColumn(_DenseColumn,
collections.namedtuple('_NumericColumn', [
'key', 'shape', 'default_value', 'dtype',
'normalizer_fn'
])):
"""see `numeric_column`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.to_float(input_tensor)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing numeric feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
class _BucketizedColumn(_DenseColumn, _CategoricalColumn,
collections.namedtuple('_BucketizedColumn', [
'source_column', 'boundaries'])):
"""See `bucketized_column`."""
@property
def name(self):
return '{}_bucketized'.format(self.source_column.name)
@property
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return array_ops.one_hot(
indices=math_ops.to_int64(input_tensor),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
@property
def _num_buckets(self):
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
dense_shape = math_ops.to_int64(array_ops.stack(
[batch_size, source_dimension]))
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None)
class _EmbeddingColumn(
_DenseColumn,
collections.namedtuple('_EmbeddingColumn', (
'categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'
))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_embedding'.format(self.categorical_column.name)
return self._name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return _safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
class _SharedEmbeddingColumn(
_DenseColumn,
collections.namedtuple('_SharedEmbeddingColumn', (
'categorical_column', 'dimension', 'combiner', 'initializer',
'shared_embedding_collection_name', 'ckpt_to_load_from',
'tensor_name_in_ckpt', 'max_norm', 'trainable'
))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_shared_embedding'.format(self.categorical_column.name)
return self._name
@property
def _var_scope_name(self):
return self.shared_embedding_collection_name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
shared_embedding_collection = ops.get_collection(
self.shared_embedding_collection_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
if embedding_weights.get_shape() != embedding_shape:
raise ValueError(
'Shared embedding collection {} contains variable {} of '
'unexpected shape {}. Expected shape is {}. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(
self.shared_embedding_collection_name, embedding_weights.name,
embedding_weights.get_shape(), embedding_shape))
else:
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
ops.add_to_collection(
self.shared_embedding_collection_name, embedding_weights)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return _safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _create_tuple(shape, value):
"""Returns a tuple with given shape and filled with value."""
if shape:
return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])
return value
def _as_tuple(value):
if not nest.is_sequence(value):
return value
return tuple([_as_tuple(v) for v in value])
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
def _is_shape_and_default_value_compatible(default_value, shape):
"""Verifies compatibility of shape and default_value."""
# Invalid condition:
# * if default_value is not a scalar and shape is empty
# * or if default_value is an iterable and shape is not empty
if nest.is_sequence(default_value) != bool(shape):
return False
if not shape:
return True
if len(default_value) != shape[0]:
return False
for i in range(shape[0]):
if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):
return False
return True
def _check_default_value(shape, default_value, dtype, key):
"""Returns default value as tuple if it's valid, otherwise raises errors.
This function verifies that `default_value` is compatible with both `shape`
and `dtype`. If it is not compatible, it raises an error. If it is compatible,
it casts default_value to a tuple and returns it. `key` is used only
for error message.
Args:
shape: An iterable of integers specifies the shape of the `Tensor`.
default_value: If a single value is provided, the same value will be applied
as the default value for every item. If an iterable of values is
provided, the shape of the `default_value` should be equal to the given
`shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
key: Column name, used only for error messages.
Returns:
A tuple which will be used as default value.
Raises:
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
if default_value is None:
return None
if isinstance(default_value, int):
return _create_tuple(shape, default_value)
if isinstance(default_value, float) and dtype.is_floating:
return _create_tuple(shape, default_value)
if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays
default_value = default_value.tolist()
if nest.is_sequence(default_value):
if not _is_shape_and_default_value_compatible(default_value, shape):
raise ValueError(
'The shape of default_value must be equal to given shape. '
'default_value: {}, shape: {}, key: {}'.format(
default_value, shape, key))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = all(
isinstance(v, int) for v in nest.flatten(default_value))
is_list_has_float = any(
isinstance(v, float) for v in nest.flatten(default_value))
if is_list_all_int:
return _as_tuple(default_value)
if is_list_has_float and dtype.is_floating:
return _as_tuple(default_value)
raise TypeError('default_value must be compatible with dtype. '
'default_value: {}, dtype: {}, key: {}'.format(
default_value, dtype, key))
class _HashedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_HashedCategoricalColumn',
['key', 'hash_bucket_size', 'dtype'])):
"""see `categorical_column_with_hash_bucket`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyFileCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyFileCategoricalColumn', (
'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',
'default_value'
))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.to_int64(input_tensor)
return lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyListCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyListCategoricalColumn', (
'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'
))):
"""See `categorical_column_with_vocabulary_list`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.to_int64(input_tensor)
return lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _IdentityCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_IdentityCategoricalColumn', (
'key', 'num_buckets', 'default_value'
))):
"""See `categorical_column_with_identity`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.to_int64(input_tensor.values, name='values')
num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
zero = math_ops.to_int64(0, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values, num_buckets, data=(values, num_buckets),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values, zero, data=(values,),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.to_int64(self.default_value),
name='default_values'),
values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.num_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _WeightedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_WeightedCategoricalColumn', (
'categorical_column', 'weight_feature_key', 'dtype'
))):
"""See `weighted_categorical_column`."""
@property
def name(self):
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_feature(self, inputs):
weight_tensor = inputs.get(self.weight_feature_key)
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input(weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
class _CrossedColumn(
_CategoricalColumn,
collections.namedtuple('_CrossedColumn',
['keys', 'hash_bucket_size', 'hash_key'])):
"""See `crossed_column`."""
@property
def name(self):
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, _FeatureColumn):
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def _parse_example_spec(self):
config = {}
for key in self.keys:
if isinstance(key, _FeatureColumn):
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
def _transform_feature(self, inputs):
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, _CategoricalColumn):
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops._sparse_cross_hashed( # pylint: disable=protected-access
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `_CrossedColumn`.
Returns:
A list of strings or `_CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, _CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
# TODO(zakaria): Move this to embedding_ops and make it public.
def _safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
with ops.name_scope(name, 'embedding_lookup',
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor_lib.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
class _IndicatorColumn(_DenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def _variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
def _verify_static_batch_size_equality(tensors, columns):
# bath_size is a tf.Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
if tensors[i].shape[0].value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = tensors[i].shape[0]
elif not expected_batch_size.is_compatible_with(tensors[i].shape[0]):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, tensors[i].shape[0]))
|
aerickson/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/assemble.py
|
4
|
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
# Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import os
import os.path
import re
import tempfile
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
if task_vars is None:
task_vars = dict()
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
decrypt = self._task.args.get('decrypt', True)
if src is None or dest is None:
result['failed'] = True
result['msg'] = "src and dest are required"
return result
if boolean(remote_src):
result.update(self._execute_module(tmp=tmp, task_vars=task_vars))
return result
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
if not tmp:
tmp = self._make_tmp_path()
if not os.path.isdir(src):
result['failed'] = True
result['msg'] = u"Source (%s) is not a directory" % src
return result
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args.update(
dict(
dest=dest,
original_basename=os.path.basename(src),
)
)
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((tmp, remote_path))
new_module_args.update( dict( src=xfered,))
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/extensions/lbaas_agentscheduler.py
|
24
|
# Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants
from neutron.extensions import agent
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron import policy
from neutron import wsgi
LOADBALANCER_POOL = 'loadbalancer-pool'
LOADBALANCER_POOLS = LOADBALANCER_POOL + 's'
LOADBALANCER_AGENT = 'loadbalancer-agent'
class PoolSchedulerController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return {'pools': []}
policy.enforce(request.context,
"get_%s" % LOADBALANCER_POOLS,
{},
plugin=lbaas_plugin)
return lbaas_plugin.list_pools_on_lbaas_agent(
request.context, kwargs['agent_id'])
class LbaasAgentHostingPoolController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return
policy.enforce(request.context,
"get_%s" % LOADBALANCER_AGENT,
{},
plugin=lbaas_plugin)
return lbaas_plugin.get_lbaas_agent_hosting_pool(
request.context, kwargs['pool_id'])
class Lbaas_agentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting LBaaS agent scheduler.
"""
@classmethod
def get_name(cls):
return "Loadbalancer Agent Scheduler"
@classmethod
def get_alias(cls):
return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_description(cls):
return "Schedule pools among lbaas agents"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(PoolSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_POOLS, controller, parent))
parent = dict(member_name="pool",
collection_name="pools")
controller = resource.Resource(LbaasAgentHostingPoolController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_AGENT, controller, parent,
path_prefix=plugin_const.
COMMON_PREFIXES[plugin_const.LOADBALANCER]))
return exts
def get_extended_resources(self, version):
return {}
class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend):
message = _("No eligible loadbalancer agent found "
"for pool %(pool_id)s.")
class NoActiveLbaasAgent(agent.AgentNotFound):
message = _("No active loadbalancer agent found "
"for pool %(pool_id)s.")
class LbaasAgentSchedulerPluginBase(object):
"""REST API to operate the lbaas agent scheduler.
All of method must be in an admin context.
"""
@abc.abstractmethod
def list_pools_on_lbaas_agent(self, context, id):
pass
@abc.abstractmethod
def get_lbaas_agent_hosting_pool(self, context, pool_id):
pass
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-pydatalog/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPydatalog(PythonPackage):
"""pyDatalog adds logic programming to Python."""
homepage = 'https://pypi.python.org/pypi/pyDatalog/'
url = 'https://pypi.io/packages/source/p/pyDatalog/pyDatalog-0.17.1.zip'
version('0.17.1', sha256='b3d9cff0b9431e0fd0b2d5eefe4414c3d3c20bd18fdd7d1b42b2f01f25bac808')
|
florentx/OpenUpgrade
|
refs/heads/8.0
|
addons/base_gengo/__init__.py
|
377
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
reddraggone9/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/cmt.py
|
31
|
from __future__ import unicode_literals
from .mtv import MTVIE
class CMTIE(MTVIE):
IE_NAME = 'cmt.com'
_VALID_URL = r'https?://www\.cmt\.com/(?:videos|shows)/(?:[^/]+/)*(?P<videoid>\d+)'
_FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/'
_TESTS = [{
'url': 'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
'md5': 'e6b7ef3c4c45bbfae88061799bbba6c2',
'info_dict': {
'id': '989124',
'ext': 'mp4',
'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
'description': 'Blame It All On My Roots',
},
}, {
'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172',
'only_matching': True,
}]
|
xiaocong/github-timeline
|
refs/heads/master
|
ghdata/worker.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from celery import Celery
from celery.utils.log import get_task_logger
from . import celeryconfig
logger = get_task_logger(__name__)
worker = Celery('ghdata.worker')
# Optional configuration, see the application user guide.
worker.config_from_object(celeryconfig)
if __name__ == '__main__':
worker.start()
|
laonawuli/addrest
|
refs/heads/master
|
web2py/handlers/wsgihandler.py
|
25
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a WSGI handler for Apache
Requires apache+mod_wsgi.
In httpd.conf put something like:
LoadModule wsgi_module modules/mod_wsgi.so
WSGIScriptAlias / /path/to/wsgihandler.py
"""
# change these parameters as required
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
if not os.path.isdir('applications'):
raise RuntimeError('Running from the wrong folder')
sys.path = [path] + [p for p in sys.path if not p == path]
sys.stdout = sys.stderr
import gluon.main
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profiler_dir=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
|
Sidney84/pa-chromium
|
refs/heads/master
|
chrome/common/extensions/docs/server2/api_data_source.py
|
3
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
from collections import defaultdict, Mapping
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _DetectInlineableTypes(schema):
"""Look for documents that are only referenced once and mark them as inline.
Actual inlining is done by _InlineDocs.
"""
if not schema.get('types'):
return
ignore = frozenset(('value', 'choices'))
refcounts = defaultdict(int)
# Use an explicit stack instead of recursion.
stack = [schema]
while stack:
node = stack.pop()
if isinstance(node, list):
stack.extend(node)
elif isinstance(node, Mapping):
if '$ref' in node:
refcounts[node['$ref']] += 1
stack.extend(v for k, v in node.iteritems() if k not in ignore)
for type_ in schema['types']:
if not 'noinline_doc' in type_:
if refcounts[type_['id']] == 1:
type_['inline_doc'] = True
def _InlineDocs(schema):
"""Replace '$ref's that refer to inline_docs with the json for those docs.
"""
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
for k in ('description', 'id', 'inline_doc'):
type_.pop(k, None)
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JSCModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json, ref_resolver, disable_refs, idl=False):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
if idl:
_DetectInlineableTypes(clean_json)
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': [self._GenerateProperty(p) for p in event.params],
'callback': self._GenerateCallback(event.callback),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
if event.callback is not None:
# Show the callback as an extra parameter.
event_dict['parameters'].append(
self._GenerateCallbackProperty(event.callback))
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parent_name'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, base_path):
def create_compiled_fs(fn, category):
return compiled_fs_factory.Create(fn, APIDataSource, category=category)
self._permissions_cache = create_compiled_fs(self._LoadPermissions,
'permissions')
self._json_cache = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, False),
'json')
self._idl_cache = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, False),
'idl')
# These caches are used if an APIDataSource does not want to resolve the
# $refs in an API. This is needed to prevent infinite recursion in
# ReferenceResolver.
self._json_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, True),
'json-no-refs')
self._idl_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, True),
'idl-no-refs')
self._idl_names_cache = create_compiled_fs(self._GetIDLNames, 'idl-names')
self._names_cache = create_compiled_fs(self._GetAllNames, 'names')
self._base_path = base_path
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request, disable_refs=False):
"""Create an APIDataSource. |disable_refs| specifies whether $ref's in
APIs being processed by the |ToDict| method of _JSCModel follows $ref's
in the API. This prevents endless recursion in ReferenceResolver.
"""
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
if not disable_refs and self._ref_resolver_factory is None:
logging.error('ReferenceResolver.Factory was never set in '
'APIDataSource.Factory.')
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._json_cache_no_refs,
self._idl_cache_no_refs,
self._names_cache,
self._idl_names_cache,
self._base_path,
samples,
disable_refs)
def _LoadPermissions(self, file_name, json_str):
return json_parse.Parse(json_str)
def _LoadJsonAPI(self, api, disable_refs):
return _JSCModel(
json_parse.Parse(api)[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _LoadIdlAPI(self, api, disable_refs):
idl = idl_parser.IDLParser().ParseData(api)
return _JSCModel(
idl_schema.IDLSchema(idl).process()[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs,
idl=True).ToDict()
def _GetIDLNames(self, base_dir, apis):
return self._GetExtNames(apis, ['idl'])
def _GetAllNames(self, base_dir, apis):
return self._GetExtNames(apis, ['json', 'idl'])
def _GetExtNames(self, apis, exts):
return [model.UnixName(os.path.splitext(api)[0]) for api in apis
if os.path.splitext(api)[1][1:] in exts]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
json_cache_no_refs,
idl_cache_no_refs,
names_cache,
idl_names_cache,
base_path,
samples,
disable_refs):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._json_cache_no_refs = json_cache_no_refs
self._idl_cache_no_refs = idl_cache_no_refs
self._names_cache = names_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
self._disable_refs = disable_refs
def _GetFeatureFile(self, filename):
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
def _GetFeatureData(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
feature_data = self._GetFeatureFile(filename).get(path, None)
if feature_data is not None:
break
# There are specific cases in which the feature is actually a list of
# features where only one needs to match; but currently these are only
# used to whitelist features for specific extension IDs. Filter those out.
if isinstance(feature_data, list):
feature_list = feature_data
feature_data = None
for single_feature in feature_list:
if 'whitelist' in single_feature:
continue
if feature_data is not None:
# Note: if you are seeing the exception below, add more heuristics as
# required to form a single feature.
raise ValueError('Multiple potential features match %s. I can\'t '
'decide which one to use. Please help!' % path)
feature_data = single_feature
if feature_data and feature_data['channel'] in ('trunk', 'dev', 'beta'):
feature_data[feature_data['channel']] = True
return feature_data
def _GenerateHandlebarContext(self, handlebar_dict, path):
handlebar_dict['permissions'] = self._GetFeatureData(path)
handlebar_dict['samples'] = _LazySamplesGetter(path, self._samples)
return handlebar_dict
def _GetAsSubdirectory(self, name):
if name.startswith('experimental_'):
parts = name[len('experimental_'):].split('_', 1)
parts[1] = 'experimental_%s' % parts[1]
return '/'.join(parts)
return name.replace('_', '/', 1)
def get(self, key):
if key.endswith('.html') or key.endswith('.json') or key.endswith('.idl'):
path, ext = os.path.splitext(key)
else:
path = key
unix_name = model.UnixName(path)
idl_names = self._idl_names_cache.GetFromFileListing(self._base_path)
names = self._names_cache.GetFromFileListing(self._base_path)
if unix_name not in names and self._GetAsSubdirectory(unix_name) in names:
unix_name = self._GetAsSubdirectory(unix_name)
if self._disable_refs:
cache, ext = (
(self._idl_cache_no_refs, '.idl') if (unix_name in idl_names) else
(self._json_cache_no_refs, '.json'))
else:
cache, ext = ((self._idl_cache, '.idl') if (unix_name in idl_names) else
(self._json_cache, '.json'))
return self._GenerateHandlebarContext(
cache.GetFromFile('%s/%s%s' % (self._base_path, unix_name, ext)),
path)
|
dudepare/django
|
refs/heads/master
|
django/core/management/commands/dumpdata.py
|
305
|
from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, router
class Command(BaseCommand):
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.')
parser.add_argument('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.')
parser.add_argument('--indent', default=None, dest='indent', type=int,
help='Specifies the indent level to use when pretty-printing output.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.')
parser.add_argument('-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).')
parser.add_argument('--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.')
parser.add_argument('--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.')
parser.add_argument('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.")
parser.add_argument('--pks', dest='primary_keys',
help="Only dump objects with given primary keys. "
"Accepts a comma separated list of keys. "
"This option will only work when you specify one model.")
parser.add_argument('-o', '--output', default=None, dest='output',
help='Specifies file to which the output is written.')
def handle(self, *app_labels, **options):
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
output = options.get('output')
show_traceback = options.get('traceback')
use_natural_foreign_keys = options.get('use_natural_foreign_keys')
use_natural_primary_keys = options.get('use_natural_primary_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
try:
model = apps.get_model(exclude)
except LookupError:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model)
else:
try:
app_config = apps.get_app_config(exclude)
except LookupError as e:
raise CommandError(str(e))
excluded_apps.add(app_config)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict((app_config, None)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects(count_only=False):
"""
Collate the objects to be serialized. If count_only is True, just
count the number of objects to be serialized.
"""
for model in serializers.sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
if count_only:
yield queryset.order_by().count()
else:
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
progress_output = None
object_count = 0
# If dumpdata is outputting to stdout, there is no way to display progress
if (output and self.stdout.isatty() and options['verbosity'] > 0):
progress_output = self.stdout
object_count = sum(get_objects(count_only=True))
stream = open(output, 'w') if output else None
try:
serializers.serialize(format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout, progress_output=progress_output,
object_count=object_count)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
|
kosz85/django
|
refs/heads/master
|
tests/timezones/admin.py
|
146
|
from django.contrib import admin
from .models import Event, Timestamp
class EventAdmin(admin.ModelAdmin):
list_display = ('dt',)
class TimestampAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
site = admin.AdminSite(name='admin_tz')
site.register(Event, EventAdmin)
site.register(Timestamp, TimestampAdmin)
|
pvdheijden/OpenCaster
|
refs/heads/master
|
libs/dvbobjects/dvbobjects/DSMCC/__init__.py
|
15
|
#! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright 2000-2001, GMD, Sankt Augustin
# -- German National Research Center for Information Technology
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
ibuildthecloud/dstack
|
refs/heads/master
|
code/agent/src/agents/pyagent/dstack/plugins/core/event_router.py
|
1
|
from dstack.type_manager import get_type_list
from dstack.type_manager import PRE_REQUEST_HANDLER, STORAGE_DRIVER
from dstack.type_manager import COMPUTE_DRIVER, POST_REQUEST_HANDLER
class Router:
def __init__(self):
pass
def route(self, req):
for handler in _handlers(req):
resp = handler.execute(req)
if resp is not None:
return resp
def _handlers(req):
for pre in get_type_list(PRE_REQUEST_HANDLER):
yield pre
drivers = []
if req.name.startswith("storage."):
drivers = get_type_list(STORAGE_DRIVER)
if req.name.startswith("compute."):
drivers = get_type_list(COMPUTE_DRIVER)
for driver in drivers:
if driver.supports(req):
yield driver
for post in get_type_list(POST_REQUEST_HANDLER):
yield post
|
sagarduwal/programming
|
refs/heads/master
|
hash_tables/min-hashing/main.py
|
2
|
def shingles_from_document(k, doc):
word = ""
temp = ""
shingles = set()
word = doc.read(k)
if not word:
return
while True:
ch=doc.read(1)
if not ch: break
temp = word[1:] + ch
word = temp
shingles.add(word)
return shingles
if __name__ == "__main__":
f = open("file.txt", "r+")
print(shingles_from_document(2, f))
|
kumar303/zamboni
|
refs/heads/master
|
mkt/webpay/models.py
|
19
|
import os
from django.conf import settings
from django.db import models
from lib.utils import static_url
from mkt.site.helpers import absolutify
from mkt.site.models import ModelBase
class ProductIcon(ModelBase):
ext_url = models.CharField(max_length=255, db_index=True, unique=True)
# Height/width of square icon as declared in JWT.
ext_size = models.IntegerField(db_index=True)
# Height/width of local icon after cache.
size = models.IntegerField(db_index=True)
# Image format as told by PIL.
format = models.CharField(max_length=4)
def storage_path(self):
return os.path.join(settings.PRODUCT_ICON_PATH, self._base_path())
def url(self):
return absolutify(os.path.join(static_url('PRODUCT_ICON_URL'),
self._base_path()))
def _base_path(self):
ext = self.format.lower()
if ext == 'jpeg':
# The CDN only allows this extension.
ext = 'jpg'
# This creates an intermediate directory to avoid too-many-links
# errors on Linux, etc
return '%s/%s.%s' % (self.pk / 1000, self.pk, ext)
class Meta:
db_table = 'payment_assets'
|
ghchinoy/tensorflow
|
refs/heads/master
|
tensorflow/python/tpu/async_checkpoint.py
|
12
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
self._last_checkpoint_step = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
if self._last_checkpoint_step != last_step:
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
if not asynchronous:
self._last_checkpoint_step = step
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._last_checkpoint_step = step
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
oavasquez/ingeneria_software
|
refs/heads/master
|
web-project/bower_components/bootstrap-datepicker/docs/conf.py
|
171
|
# -*- coding: utf-8 -*-
#
# bootstrap-datepicker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 2 14:45:57 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
primary_domain = 'js'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bootstrap-datepicker'
copyright = u'2016, eternicode'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes',]
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bootstrap-datepickerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bootstrap-datepicker.tex', u'bootstrap-datepicker Documentation',
u'eternicode', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
[u'eternicode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
u'eternicode', 'bootstrap-datepicker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
dapeng0802/django-blog-zinnia
|
refs/heads/develop
|
zinnia/urls/capabilities.py
|
8
|
"""Urls for the zinnia capabilities"""
from django.conf.urls import url
from zinnia.views.capabilities import RsdXml
from zinnia.views.capabilities import HumansTxt
from zinnia.views.capabilities import OpenSearchXml
from zinnia.views.capabilities import WLWManifestXml
urlpatterns = [
url(r'^rsd.xml$', RsdXml.as_view(),
name='rsd'),
url(r'^humans.txt$', HumansTxt.as_view(),
name='humans'),
url(r'^opensearch.xml$', OpenSearchXml.as_view(),
name='opensearch'),
url(r'^wlwmanifest.xml$', WLWManifestXml.as_view(),
name='wlwmanifest'),
]
|
tymofij/adofex
|
refs/heads/master
|
transifex/resources/urls/ajax.py
|
1
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from transifex.resources.urls import RESOURCE_URL_PARTIAL, RESOURCE_LANG_URL_PARTIAL
from transifex.resources.views import resource_actions, update_translation, \
lock_and_get_translation_file, resource_pseudo_translation_actions
urlpatterns = patterns('',
url(RESOURCE_URL_PARTIAL + r'l/(?P<target_lang_code>[\-_@\w\.]+)/actions/$',
resource_actions, name='resource_actions'),
url(RESOURCE_LANG_URL_PARTIAL + r'update/$',
update_translation, name='update_translation'),
url(RESOURCE_URL_PARTIAL + r'add_translation/$',
update_translation, name='add_translation'),
url(RESOURCE_LANG_URL_PARTIAL+'download/lock/$',
lock_and_get_translation_file, name='lock_and_download_for_translation'),
url(RESOURCE_URL_PARTIAL + r'pseudo_translation_actions/$',
resource_pseudo_translation_actions, name='pseudo_translation_actions'),
)
|
mikalstill/nova
|
refs/heads/master
|
nova/tests/unit/virt/test_configdrive.py
|
24
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova import test
from nova.virt import configdrive
class ConfigDriveTestCase(test.NoDBTestCase):
def test_instance_force(self):
self.flags(force_config_drive=False)
instance = objects.Instance(
config_drive="yes",
system_metadata={
"image_img_config_drive": "mandatory",
}
)
self.assertTrue(configdrive.required_by(instance))
def test_image_meta_force(self):
self.flags(force_config_drive=False)
instance = objects.Instance(
config_drive=None,
system_metadata={
"image_img_config_drive": "mandatory",
}
)
self.assertTrue(configdrive.required_by(instance))
def test_config_flag_force(self):
self.flags(force_config_drive=True)
instance = objects.Instance(
config_drive=None,
system_metadata={
"image_img_config_drive": "optional",
}
)
self.assertTrue(configdrive.required_by(instance))
def test_no_config_drive(self):
self.flags(force_config_drive=False)
instance = objects.Instance(
config_drive=None,
system_metadata={
"image_img_config_drive": "optional",
}
)
self.assertFalse(configdrive.required_by(instance))
|
zak-k/cartopy
|
refs/heads/master
|
lib/cartopy/tests/crs/test_rotated_geodetic.py
|
3
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Transverse Mercator projection, including OSGB and OSNI.
"""
from __future__ import (absolute_import, division, print_function)
import unittest
from numpy.testing import assert_almost_equal
from nose.tools import assert_equal
import cartopy.crs as ccrs
class TestRotatedPole(unittest.TestCase):
def check_proj4_params(self, crs, expected):
pro4_params = sorted(crs.proj4_init.split(' +'))
assert_equal(expected, pro4_params)
def test_default(self):
geos = ccrs.RotatedPole(60, 50, 80)
expected = ['+ellps=WGS84', 'lon_0=240', 'no_defs', 'o_lat_p=50',
'o_lon_p=80', 'o_proj=latlon', 'proj=ob_tran',
'to_meter=0.0174532925199433']
self.check_proj4_params(geos, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
40223139/LEGOg7-39
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/_sre.py
|
622
|
# NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
|
WangDequan/pyvision
|
refs/heads/master
|
experiments/occlusionreport.py
|
4
|
import pickle
from pylab import *
loaded = pickle.load(open("occlusion.pkl"))
labeledframes = [0]
ranges = {
"Visible": ([(270, 330), (469, 500)], "black"),
"Partial Occlusion": ([(330, 375), (455, 469)], "green"),
"Severe Occlusion": ([(390, 420)], "blue"),
"Total Occlusion": ([(375, 390), (420, 455)], "red"),
}
for label, data in ranges.items():
for start, stop in data[0]:
use = dict(x for x in loaded.items() if start <= x[0] <= stop)
use = use.items()
use.sort()
keys = [x[0] for x in use]
values = [x[1] for x in use]
plot(keys, values, color = data[1], linewidth=4, label = label)
label = "_nolegend_"
diamondx, diamondy = max(((x[1], x[0]) for x in loaded.items()))
plot(diamondy, diamondx, "k*", label = "Requested Frame", markersize = 10)
xlabel("Frame")
ylabel("Expected Label Change")
#legend(loc = "upper left", numpoints=1)
ylim(0, 40)
show()
|
h2oai/h2o-dev
|
refs/heads/master
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_distributions_deeplearning.py
|
6
|
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def weights_and_distributions():
htable = h2o.upload_file(pyunit_utils.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = H2ODeepLearningEstimator(distribution="gamma")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = H2ODeepLearningEstimator(distribution="gaussian")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = H2ODeepLearningEstimator(distribution="poisson")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = H2ODeepLearningEstimator(distribution="tweedie")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_distributions)
else:
weights_and_distributions()
|
arlowhite/kivy
|
refs/heads/master
|
kivy/uix/settings.py
|
6
|
'''
Settings
========
.. versionadded:: 1.0.7
This module provides a complete and extensible framework for adding a
Settings interface to your application. By default, the interface uses
a :class:`SettingsWithSpinner`, which consists of a
:class:`~kivy.uix.spinner.Spinner` (top) to switch between individual
settings panels (bottom). See :ref:`differentlayouts` for some
alternatives.
.. image:: images/settingswithspinner_kivy.jpg
:align: center
A :class:`SettingsPanel` represents a group of configurable options. The
:attr:`SettingsPanel.title` property is used by :class:`Settings` when a panel
is added: it determines the name of the sidebar button. SettingsPanel controls
a :class:`~kivy.config.ConfigParser` instance.
The panel can be automatically constructed from a JSON definition file: you
describe the settings you want and corresponding sections/keys in the
ConfigParser instance... and you're done!
Settings are also integrated into the :class:`~kivy.app.App` class. Use
:meth:`Settings.add_kivy_panel` to configure the Kivy core settings in a panel.
.. _settings_json:
Create a panel from JSON
------------------------
To create a panel from a JSON-file, you need two things:
* a :class:`~kivy.config.ConfigParser` instance with default values
* a JSON file
.. warning::
The :class:`kivy.config.ConfigParser` is required. You cannot use the
default ConfigParser from Python libraries.
You must create and handle the :class:`~kivy.config.ConfigParser`
object. SettingsPanel will read the values from the associated
ConfigParser instance. Make sure you have set default values (using
:attr:`~kivy.config.ConfigParser.setdefaults`) for all the sections/keys
in your JSON file!
The JSON file contains structured information to describe the available
settings. Here is an example::
[
{
"type": "title",
"title": "Windows"
},
{
"type": "bool",
"title": "Fullscreen",
"desc": "Set the window in windowed or fullscreen",
"section": "graphics",
"key": "fullscreen",
"true": "auto"
}
]
Each element in the root list represents a setting that the user can configure.
Only the "type" key is mandatory: an instance of the associated class will be
created and used for the setting - other keys are assigned to corresponding
properties of that class.
============== =================================================
Type Associated class
-------------- -------------------------------------------------
title :class:`SettingTitle`
bool :class:`SettingBoolean`
numeric :class:`SettingNumeric`
options :class:`SettingOptions`
string :class:`SettingString`
path :class:`SettingPath`
============== =================================================
.. versionadded:: 1.1.0
Added :attr:`SettingPath` type
In the JSON example above, the first element is of type "title". It will create
a new instance of :class:`SettingTitle` and apply the rest of the key-value
pairs to the properties of that class, i.e. "title": "Windows" sets the
:attr:`~SettingsPanel.title` property of the panel to "Windows".
To load the JSON example to a :class:`Settings` instance, use the
:meth:`Settings.add_json_panel` method. It will automatically instantiate a
:class:`SettingsPanel` and add it to :class:`Settings`::
from kivy.config import ConfigParser
config = ConfigParser()
config.read('myconfig.ini')
s = Settings()
s.add_json_panel('My custom panel', config, 'settings_custom.json')
s.add_json_panel('Another panel', config, 'settings_test2.json')
# then use the s as a widget...
.. _differentlayouts:
Different panel layouts
-----------------------
A kivy :class:`~kivy.app.App` can automatically create and display a
:class:`Settings` instance. See the :attr:`~kivy.app.App.settings_cls`
documentation for details on how to choose which settings class to
display.
Several pre-built settings widgets are available. All except
:class:`SettingsWithNoMenu` include close buttons triggering the
on_close event.
- :class:`Settings`: Displays settings with a sidebar at the left to
switch between json panels.
- :class:`SettingsWithSidebar`: A trivial subclass of
:class:`Settings`.
- :class:`SettingsWithSpinner`: Displays settings with a spinner at
the top, which can be used to switch between json panels. Uses
:class:`InterfaceWithSpinner` as the
:attr:`~Settings.interface_cls`. This is the default behavior from
Kivy 1.8.0.
- :class:`SettingsWithTabbedPanel`: Displays json panels as individual
tabs in a :class:`~kivy.uix.tabbedpanel.TabbedPanel`. Uses
:class:`InterfaceWithTabbedPanel` as the :attr:`~Settings.interface_cls`.
- :class:`SettingsWithNoMenu`: Displays a single json panel, with no
way to switch to other panels and no close button. This makes it
impossible for the user to exit unless
:meth:`~kivy.app.App.close_settings` is overridden with a different
close trigger! Uses :class:`InterfaceWithNoMenu` as the
:attr:`~Settings.interface_cls`.
You can construct your own settings panels with any layout you choose
by setting :attr:`Settings.interface_cls`. This should be a widget
that displays a json settings panel with some way to switch between
panels. An instance will be automatically created by :class:`Settings`.
Interface widgets may be anything you like, but *must* have a method
add_panel that recieves newly created json settings panels for the
interface to display. See the documentation for
:class:`InterfaceWithSidebar` for more information. They may
optionally dispatch an on_close event, for instance if a close button
is clicked. This event is used by :class:`Settings` to trigger its own
on_close event.
For a complete, working example, please see
:file:`kivy/examples/settings/main.py`.
'''
__all__ = ('Settings', 'SettingsPanel', 'SettingItem', 'SettingString',
'SettingPath', 'SettingBoolean', 'SettingNumeric', 'SettingOptions',
'SettingTitle', 'SettingsWithSidebar', 'SettingsWithSpinner',
'SettingsWithTabbedPanel', 'SettingsWithNoMenu',
'InterfaceWithSidebar', 'ContentPanel')
import json
import os
from kivy.factory import Factory
from kivy.metrics import dp
from kivy.config import ConfigParser
from kivy.animation import Animation
from kivy.compat import string_types, text_type
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty, ListProperty, \
BooleanProperty, NumericProperty, DictProperty
class SettingSpacer(Widget):
# Internal class, not documented.
pass
class SettingItem(FloatLayout):
'''Base class for individual settings (within a panel). This class cannot
be used directly; it is used for implementing the other setting classes.
It builds a row with a title/description (left) and a setting control
(right).
Look at :class:`SettingBoolean`, :class:`SettingNumeric` and
:class:`SettingOptions` for usage examples.
:Events:
`on_release`
Fired when the item is touched and then released.
'''
title = StringProperty('<No title set>')
'''Title of the setting, defaults to '<No title set>'.
:attr:`title` is a :class:`~kivy.properties.StringProperty` and defaults to
'<No title set>'.
'''
desc = StringProperty(None, allownone=True)
'''Description of the setting, rendered on the line below the title.
:attr:`desc` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
disabled = BooleanProperty(False)
'''Indicate if this setting is disabled. If True, all touches on the
setting item will be discarded.
:attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
section = StringProperty(None)
'''Section of the token inside the :class:`~kivy.config.ConfigParser`
instance.
:attr:`section` is a :class:`~kivy.properties.StringProperty` and defaults
to None.
'''
key = StringProperty(None)
'''Key of the token inside the :attr:`section` in the
:class:`~kivy.config.ConfigParser` instance.
:attr:`key` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
value = ObjectProperty(None)
'''Value of the token according to the :class:`~kivy.config.ConfigParser`
instance. Any change to this value will trigger a
:meth:`Settings.on_config_change` event.
:attr:`value` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
panel = ObjectProperty(None)
'''(internal) Reference to the SettingsPanel for this setting. You don't
need to use it.
:attr:`panel` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
content = ObjectProperty(None)
'''(internal) Reference to the widget that contains the real setting.
As soon as the content object is set, any further call to add_widget will
call the content.add_widget. This is automatically set.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
selected_alpha = NumericProperty(0)
'''(internal) Float value from 0 to 1, used to animate the background when
the user touches the item.
:attr:`selected_alpha` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
__events__ = ('on_release', )
def __init__(self, **kwargs):
super(SettingItem, self).__init__(**kwargs)
self.value = self.panel.get_value(self.section, self.key)
def add_widget(self, *largs):
if self.content is None:
return super(SettingItem, self).add_widget(*largs)
return self.content.add_widget(*largs)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if self.disabled:
return
touch.grab(self)
self.selected_alpha = 1
return super(SettingItem, self).on_touch_down(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.dispatch('on_release')
Animation(selected_alpha=0, d=.25, t='out_quad').start(self)
return True
return super(SettingItem, self).on_touch_up(touch)
def on_release(self):
pass
def on_value(self, instance, value):
if not self.section or not self.key:
return
# get current value in config
panel = self.panel
if not isinstance(value, string_types):
value = str(value)
panel.set_value(self.section, self.key, value)
class SettingBoolean(SettingItem):
'''Implementation of a boolean setting on top of a :class:`SettingItem`. It
is visualized with a :class:`~kivy.uix.switch.Switch` widget. By default,
0 and 1 are used for values: you can change them by setting :attr:`values`.
'''
values = ListProperty(['0', '1'])
'''Values used to represent the state of the setting. If you want to use
"yes" and "no" in your ConfigParser instance::
SettingBoolean(..., values=['no', 'yes'])
.. warning::
You need a minimum of two values, the index 0 will be used as False,
and index 1 as True
:attr:`values` is a :class:`~kivy.properties.ListProperty` and defaults to
['0', '1']
'''
class SettingString(SettingItem):
'''Implementation of a string setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.text.strip()
self.value = value
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingPath(SettingItem):
'''Implementation of a Path setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.filechooser.FileChooserListView` so the user can enter
a custom value.
.. versionadded:: 1.1.0
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.selection
if not value:
return
self.value = os.path.realpath(value[0])
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing=5)
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, 0.9),
width=popup_width)
# create the filechooser
self.textinput = textinput = FileChooserListView(
path=self.value, size_hint=(1, 1), dirselect=True)
textinput.bind(on_path=self._validate)
self.textinput = textinput
# construct the content
content.add_widget(textinput)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingNumeric(SettingString):
'''Implementation of a numeric setting on top of a :class:`SettingString`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
def _validate(self, instance):
# we know the type just by checking if there is a '.' in the original
# value
is_float = '.' in str(self.value)
self._dismiss()
try:
if is_float:
self.value = text_type(float(self.textinput.text))
else:
self.value = text_type(int(self.textinput.text))
except ValueError:
return
class SettingOptions(SettingItem):
'''Implementation of an option list on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
list of options from which the user can select.
'''
options = ListProperty([])
'''List of all availables options. This must be a list of "string" items.
Otherwise, it will crash. :)
:attr:`options` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _set_option(self, instance):
self.value = instance.text
self.popup.dismiss()
def _create_popup(self, instance):
# create the popup
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
content=content, title=self.title, size_hint=(None, None),
size=(popup_width, '400dp'))
popup.height = len(self.options) * dp(55) + dp(150)
# add all the options
content.add_widget(Widget(size_hint_y=None, height=1))
uid = str(self.uid)
for option in self.options:
state = 'down' if option == self.value else 'normal'
btn = ToggleButton(text=option, state=state, group=uid)
btn.bind(on_release=self._set_option)
content.add_widget(btn)
# finally, add a cancel button to return on the previous panel
content.add_widget(SettingSpacer())
btn = Button(text='Cancel', size_hint_y=None, height=dp(50))
btn.bind(on_release=popup.dismiss)
content.add_widget(btn)
# and open the popup !
popup.open()
class SettingTitle(Label):
'''A simple title label, used to organize the settings in sections.
'''
title = Label.text
panel = ObjectProperty(None)
class SettingsPanel(GridLayout):
'''This class is used to contruct panel settings, for use with a
:class:`Settings` instance or subclass.
'''
title = StringProperty('Default title')
'''Title of the panel. The title will be reused by the :class:`Settings` in
the sidebar.
'''
config = ObjectProperty(None, allownone=True)
'''A :class:`kivy.config.ConfigParser` instance. See module documentation
for more information.
'''
settings = ObjectProperty(None)
'''A :class:`Settings` instance that will be used to fire the
`on_config_change` event.
'''
def __init__(self, **kwargs):
if 'cols' not in kwargs:
self.cols = 1
super(SettingsPanel, self).__init__(**kwargs)
def on_config(self, instance, value):
if value is None:
return
if not isinstance(value, ConfigParser):
raise Exception('Invalid config object, you must use a'
'kivy.config.ConfigParser, not another one !')
def get_value(self, section, key):
'''Return the value of the section/key from the :attr:`config`
ConfigParser instance. This function is used by :class:`SettingItem` to
get the value for a given section/key.
If you don't want to use a ConfigParser instance, you might want to
override this function.
'''
config = self.config
if not config:
return
return config.get(section, key)
def set_value(self, section, key, value):
current = self.get_value(section, key)
if current == value:
return
config = self.config
if config:
config.set(section, key, value)
config.write()
settings = self.settings
if settings:
settings.dispatch('on_config_change',
config, section, key, value)
class InterfaceWithSidebar(BoxLayout):
'''The default Settings interface class. It displays a sidebar menu
with names of available settings panels, which may be used to switch
which one is currently displayed.
See :meth:`~InterfaceWithSidebar.add_panel` for information on the
method you must implement if creating your own interface.
This class also dispatches an event 'on_close', which is triggered
when the sidebar menu's close button is released. If creating your
own interface widget, it should also dispatch such an event which
will automatically be caught by :class:`Settings` and used to
trigger its own 'on_close' event.
'''
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithSidebar, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and the interface should provide a way to switch
between panels.
:param name: The name of the panel as a string. It
may be used to represent the panel but isn't necessarily
unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.menu.add_item(name, uid)
self.content.add_panel(panel, name, uid)
def on_close(self, *args):
pass
class InterfaceWithSpinner(BoxLayout):
'''A settings interface that displays a spinner at the top for
switching between panels.
The workings of this class are considered internal and are not
documented. See :meth:`InterfaceWithSidebar` for
information on implementing your own interface class.
'''
__events__ = ('on_close', )
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defauls to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, *args, **kwargs):
super(InterfaceWithSpinner, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and the interface should provide a way to switch
between panels.
:param name: The name of the panel as a string. It
may be used to represent the panel but may not
be unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.content.add_panel(panel, name, uid)
self.menu.add_item(name, uid)
def on_close(self, *args):
pass
class ContentPanel(ScrollView):
'''A class for displaying settings panels. It displays a single
settings panel at a time, taking up the full size and shape of the
ContentPanel. It is used by :class:`InterfaceWithSidebar` and
:class:`InterfaceWithSpinner` to display settings.
'''
panels = DictProperty({})
'''(internal) Stores a dictionary mapping settings panels to their uids.
:attr:`panels` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
container = ObjectProperty()
'''(internal) A reference to the GridLayout that contains the
settings panel.
:attr:`container` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_panel = ObjectProperty(None)
'''(internal) A reference to the current settings panel.
:attr:`current_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_uid = NumericProperty(0)
'''(internal) A reference to the uid of the current settings panel.
:attr:`current_uid` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and displayed when requested.
:param name: The name of the panel as a string. It
may be used to represent the panel.
:param uid: A unique int identifying the panel. It should be
stored and used to identify panels when switching.
'''
self.panels[uid] = panel
if not self.current_uid:
self.current_uid = uid
def on_current_uid(self, *args):
'''The uid of the currently displayed panel. Changing this will
automatically change the displayed panel.
:param uid: A panel uid. It should be used to retrieve and
display a settings panel that has previously been
added with :meth:`add_panel`.
'''
uid = self.current_uid
if uid in self.panels:
if self.current_panel is not None:
self.remove_widget(self.current_panel)
new_panel = self.panels[uid]
self.add_widget(new_panel)
self.current_panel = new_panel
return True
return False # New uid doesn't exist
def add_widget(self, widget):
if self.container is None:
super(ContentPanel, self).add_widget(widget)
else:
self.container.add_widget(widget)
def remove_widget(self, widget):
self.container.remove_widget(widget)
class Settings(BoxLayout):
'''Settings UI. Check module documentation for more information on how
to use this class.
:Events:
`on_config_change`: ConfigParser instance, section, key, value
Fired when the section's key-value pair of a ConfigParser changes.
.. warning:
value will be str/unicode type, regardless of the setting
type (numeric, boolean, etc)
`on_close`
Fired by the default panel when the Close button is pressed.
'''
interface = ObjectProperty(None)
'''(internal) Reference to the widget that will contain, organise and
display the panel configuration panel widgets.
:attr:`interface` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
interface_cls = ObjectProperty(InterfaceWithSidebar)
'''The widget class that will be used to display the graphical
interface for the settings panel. By default, it displays one Settings
panel at a time with a sidebar to switch between them.
:attr:`interface_cls` is an
:class:`~kivy.properties.ObjectProperty` and defaults to
:class:`InterfaceWithSidebar`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
__events__ = ('on_close', 'on_config_change')
def __init__(self, *args, **kargs):
self._types = {}
super(Settings, self).__init__(*args, **kargs)
self.add_interface()
self.register_type('string', SettingString)
self.register_type('bool', SettingBoolean)
self.register_type('numeric', SettingNumeric)
self.register_type('options', SettingOptions)
self.register_type('title', SettingTitle)
self.register_type('path', SettingPath)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(Settings, self).on_touch_down(touch)
return True
def register_type(self, tp, cls):
'''Register a new type that can be used in the JSON definition.
'''
self._types[tp] = cls
def on_close(self, *args):
pass
def add_interface(self):
'''(Internal) creates an instance of :attr:`Settings.interface_cls`,
and sets it to :attr:`~Settings.interface`. When json panels are
created, they will be added to this interface which will display them
to the user.
'''
cls = self.interface_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
interface = cls()
self.interface = interface
self.add_widget(interface)
self.interface.bind(on_close=lambda j: self.dispatch('on_close'))
def on_config_change(self, config, section, key, value):
pass
def add_json_panel(self, title, config, filename=None, data=None):
'''Create and add a new :class:`SettingsPanel` using the configuration
`config` with the JSON definition `filename`.
Check the :ref:`settings_json` section in the documentation for more
information about JSON format and the usage of this function.
'''
panel = self.create_json_panel(title, config, filename, data)
uid = panel.uid
if self.interface is not None:
self.interface.add_panel(panel, title, uid)
def create_json_panel(self, title, config, filename=None, data=None):
'''Create new :class:`SettingsPanel`.
.. versionadded:: 1.5.0
Check the documentation of :meth:`add_json_panel` for more information.
'''
if filename is None and data is None:
raise Exception('You must specify either the filename or data')
if filename is not None:
with open(filename, 'r') as fd:
data = json.loads(fd.read())
else:
data = json.loads(data)
if type(data) != list:
raise ValueError('The first element must be a list')
panel = SettingsPanel(title=title, settings=self, config=config)
for setting in data:
# determine the type and the class to use
if not 'type' in setting:
raise ValueError('One setting are missing the "type" element')
ttype = setting['type']
cls = self._types.get(ttype)
if cls is None:
raise ValueError(
'No class registered to handle the <%s> type' %
setting['type'])
# create a instance of the class, without the type attribute
del setting['type']
str_settings = {}
for key, item in setting.items():
str_settings[str(key)] = item
instance = cls(panel=panel, **str_settings)
# instance created, add to the panel
panel.add_widget(instance)
return panel
def add_kivy_panel(self):
'''Add a panel for configuring Kivy. This panel acts directly on the
kivy configuration. Feel free to include or exclude it in your
configuration.
See :meth:`~kivy.app.App.use_kivy_settings` for information on
enabling/disabling the automatic kivy panel.
'''
from kivy import kivy_data_dir
from kivy.config import Config
from os.path import join
self.add_json_panel('Kivy', Config,
join(kivy_data_dir, 'settings_kivy.json'))
class SettingsWithSidebar(Settings):
'''A settings widget that displays settings panels with a sidebar to
switch between them. This is the default behaviour of
:class:`Settings`, and this widget is a trivial wrapper subclass.
'''
class SettingsWithSpinner(Settings):
'''A settings widget that displays one settings panel at a time with a
spinner at the top to switch between them.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithSpinner
super(SettingsWithSpinner, self).__init__(*args, **kwargs)
class SettingsWithTabbedPanel(Settings):
'''A settings widget that displays settings panels as pages in a
:class:`~kivy.uix.tabbedpanel.TabbedPanel`.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithTabbedPanel
super(SettingsWithTabbedPanel, self).__init__(*args, **kwargs)
def on_close(self, *args):
pass
class SettingsWithNoMenu(Settings):
'''A settings widget that displays a single settings panel with *no*
Close button. It will not accept more than one Settings panel. It
is intended for use in programs with few enough settings that a
full panel switcher is not useful.
.. warning::
This Settings panel does *not* provide a Close
button, and so it is impossible to leave the settings screen
unless you also add other behaviour or override
:meth:`~kivy.app.App.display_settings` and
:meth:`~kivy.app.App.close_settings`.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithNoMenu
super(SettingsWithNoMenu, self).__init__(*args, **kwargs)
class InterfaceWithNoMenu(ContentPanel):
'''The interface widget used by :class:`SettingsWithNoMenu`. It
stores and displays a single settings panel.
This widget is considered internal and is not documented. See the
:class:`ContentPanel` for information on defining your own content
widget.
'''
def add_widget(self, widget):
if self.container is not None and len(self.container.children) > 0:
raise Exception(
'ContentNoMenu cannot accept more than one settings panel')
super(InterfaceWithNoMenu, self).add_widget(widget)
class InterfaceWithTabbedPanel(FloatLayout):
'''The content widget used by :class:`SettingsWithTabbedPanel`. It
stores and displays Settings panels in tabs of a TabbedPanel.
This widget is considered internal and is not documented. See
:class:`InterfaceWithSidebar` for information on defining your own
interface widget.
'''
tabbedpanel = ObjectProperty()
close_button = ObjectProperty()
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithTabbedPanel, self).__init__(*args, **kwargs)
self.close_button.bind(on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
if not self.tabbedpanel.default_tab_content:
self.tabbedpanel.default_tab_text = name
self.tabbedpanel.default_tab_content = scrollview
else:
panelitem = TabbedPanelHeader(text=name, content=scrollview)
self.tabbedpanel.add_widget(panelitem)
def on_close(self, *args):
pass
class MenuSpinner(BoxLayout):
'''The menu class used by :class:`SettingsWithSpinner`. It provides a
sidebar with an entry for each settings panel.
This widget is considered internal and is not documented. See
:class:`MenuSidebar` for information on menus and creating your own menu
class.
'''
selected_uid = NumericProperty(0)
close_button = ObjectProperty(0)
spinner = ObjectProperty()
panel_names = DictProperty({})
spinner_text = StringProperty()
close_button = ObjectProperty()
def add_item(self, name, uid):
values = self.spinner.values
if name in values:
i = 2
while name + ' {}'.format(i) in values:
i += 1
name = name + ' {}'.format(i)
self.panel_names[name] = uid
self.spinner.values.append(name)
if not self.spinner.text:
self.spinner.text = name
def on_spinner_text(self, *args):
text = self.spinner_text
self.selected_uid = self.panel_names[text]
class MenuSidebar(FloatLayout):
'''The menu used by :class:`InterfaceWithSidebar`. It provides a
sidebar with an entry for each settings panel, which the user may
click to select.
'''
selected_uid = NumericProperty(0)
'''The uid of the currently selected panel. This may be used to switch
between displayed panels, e.g. by binding it to the
:attr:`~ContentPanel.current_uid` of a :class:`ContentPanel`.
:attr:`selected_uid` is a
:class`~kivy.properties.NumericProperty` and defaults to 0.
'''
buttons_layout = ObjectProperty(None)
'''(internal) Reference to the GridLayout that contains individual
settings panel menu buttons.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
close_button = ObjectProperty(None)
'''(internal) Reference to the widget's Close button.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
def add_item(self, name, uid):
'''This method is used to add new panels to the menu.
:param name: The name (a string) of the panel. It should be
used to represent the panel in the menu.
:param uid: The name (an int) of the panel. It should be used
internally to represent the panel and used to set
self.selected_uid when the panel is changed.
'''
label = SettingSidebarLabel(text=name, uid=uid, menu=self)
if len(self.buttons_layout.children) == 0:
label.selected = True
if self.buttons_layout is not None:
self.buttons_layout.add_widget(label)
def on_selected_uid(self, *args):
'''(internal) unselects any currently selected menu buttons, unless
they represent the current panel.
'''
for button in self.buttons_layout.children:
if button.uid != self.selected_uid:
button.selected = False
class SettingSidebarLabel(Label):
# Internal class, not documented.
selected = BooleanProperty(False)
uid = NumericProperty(0)
menu = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self.selected = True
self.menu.selected_uid = self.uid
if __name__ == '__main__':
from kivy.app import App
class SettingsApp(App):
def build(self):
s = Settings()
s.add_kivy_panel()
s.bind(on_close=self.stop)
return s
SettingsApp().run()
|
barryrobison/arsenalsuite
|
refs/heads/master
|
cpp/lib/PyQt4/examples/widgets/tetrix.py
|
17
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import random
from PyQt4 import QtCore, QtGui
NoShape, ZShape, SShape, LineShape, TShape, SquareShape, LShape, MirroredLShape = range(8)
class TetrixWindow(QtGui.QWidget):
def __init__(self):
super(TetrixWindow, self).__init__()
self.board = TetrixBoard()
nextPieceLabel = QtGui.QLabel()
nextPieceLabel.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
nextPieceLabel.setAlignment(QtCore.Qt.AlignCenter)
self.board.setNextPieceLabel(nextPieceLabel)
scoreLcd = QtGui.QLCDNumber(5)
scoreLcd.setSegmentStyle(QtGui.QLCDNumber.Filled)
levelLcd = QtGui.QLCDNumber(2)
levelLcd.setSegmentStyle(QtGui.QLCDNumber.Filled)
linesLcd = QtGui.QLCDNumber(5)
linesLcd.setSegmentStyle(QtGui.QLCDNumber.Filled)
startButton = QtGui.QPushButton("&Start")
startButton.setFocusPolicy(QtCore.Qt.NoFocus)
quitButton = QtGui.QPushButton("&Quit")
quitButton.setFocusPolicy(QtCore.Qt.NoFocus)
pauseButton = QtGui.QPushButton("&Pause")
pauseButton.setFocusPolicy(QtCore.Qt.NoFocus)
startButton.clicked.connect(self.board.start)
pauseButton.clicked.connect(self.board.pause)
quitButton.clicked.connect(QtGui.qApp.quit)
self.board.scoreChanged.connect(scoreLcd.display)
self.board.levelChanged.connect(levelLcd.display)
self.board.linesRemovedChanged.connect(linesLcd.display)
layout = QtGui.QGridLayout()
layout.addWidget(self.createLabel("NEXT"), 0, 0)
layout.addWidget(nextPieceLabel, 1, 0)
layout.addWidget(self.createLabel("LEVEL"), 2, 0)
layout.addWidget(levelLcd, 3, 0)
layout.addWidget(startButton, 4, 0)
layout.addWidget(self.board, 0, 1, 6, 1)
layout.addWidget(self.createLabel("SCORE"), 0, 2)
layout.addWidget(scoreLcd, 1, 2)
layout.addWidget(self.createLabel("LINES REMOVED"), 2, 2)
layout.addWidget(linesLcd, 3, 2)
layout.addWidget(quitButton, 4, 2)
layout.addWidget(pauseButton, 5, 2)
self.setLayout(layout)
self.setWindowTitle("Tetrix")
self.resize(550, 370)
def createLabel(self, text):
lbl = QtGui.QLabel(text)
lbl.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignBottom)
return lbl
class TetrixBoard(QtGui.QFrame):
BoardWidth = 10
BoardHeight = 22
scoreChanged = QtCore.pyqtSignal(int)
levelChanged = QtCore.pyqtSignal(int)
linesRemovedChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(TetrixBoard, self).__init__(parent)
self.timer = QtCore.QBasicTimer()
self.nextPieceLabel = None
self.isWaitingAfterLine = False
self.curPiece = TetrixPiece()
self.nextPiece = TetrixPiece()
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 0
self.board = None
self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Sunken)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
self.nextPiece.setRandomShape()
def shapeAt(self, x, y):
return self.board[(y * TetrixBoard.BoardWidth) + x]
def setShapeAt(self, x, y, shape):
self.board[(y * TetrixBoard.BoardWidth) + x] = shape
def timeoutTime(self):
return 1000 / (1 + self.level)
def squareWidth(self):
return self.contentsRect().width() / TetrixBoard.BoardWidth
def squareHeight(self):
return self.contentsRect().height() / TetrixBoard.BoardHeight
def setNextPieceLabel(self, label):
self.nextPieceLabel = label
def sizeHint(self):
return QtCore.QSize(TetrixBoard.BoardWidth * 15 + self.frameWidth() * 2,
TetrixBoard.BoardHeight * 15 + self.frameWidth() * 2)
def minimumSizeHint(self):
return QtCore.QSize(TetrixBoard.BoardWidth * 5 + self.frameWidth() * 2,
TetrixBoard.BoardHeight * 5 + self.frameWidth() * 2)
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 1
self.clearBoard()
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.levelChanged.emit(self.level)
self.newPiece()
self.timer.start(self.timeoutTime(), self)
def pause(self):
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
else:
self.timer.start(self.timeoutTime(), self)
self.update()
def paintEvent(self, event):
super(TetrixBoard, self).paintEvent(event)
painter = QtGui.QPainter(self)
rect = self.contentsRect()
if self.isPaused:
painter.drawText(rect, QtCore.Qt.AlignCenter, "Pause")
return
boardTop = rect.bottom() - TetrixBoard.BoardHeight * self.squareHeight()
for i in range(TetrixBoard.BoardHeight):
for j in range(TetrixBoard.BoardWidth):
shape = self.shapeAt(j, TetrixBoard.BoardHeight - i - 1)
if shape != NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
boardTop + i * self.squareHeight(), shape)
if self.curPiece.shape() != NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
boardTop + (TetrixBoard.BoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
if not self.isStarted or self.isPaused or self.curPiece.shape() == NoShape:
super(TetrixBoard, self).keyPressEvent(event)
return
key = event.key()
if key == QtCore.Qt.Key_Left:
self.tryMove(self.curPiece, self.curX - 1, self.curY)
elif key == QtCore.Qt.Key_Right:
self.tryMove(self.curPiece, self.curX + 1, self.curY)
elif key == QtCore.Qt.Key_Down:
self.tryMove(self.curPiece.rotatedRight(), self.curX, self.curY)
elif key == QtCore.Qt.Key_Up:
self.tryMove(self.curPiece.rotatedLeft(), self.curX, self.curY)
elif key == QtCore.Qt.Key_Space:
self.dropDown()
elif key == QtCore.Qt.Key_D:
self.oneLineDown()
else:
super(TetrixBoard, self).keyPressEvent(event)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
self.timer.start(self.timeoutTime(), self)
else:
self.oneLineDown()
else:
super(TetrixBoard, self).timerEvent(event)
def clearBoard(self):
self.board = [NoShape for i in range(TetrixBoard.BoardHeight * TetrixBoard.BoardWidth)]
def dropDown(self):
dropHeight = 0
newY = self.curY
while newY > 0:
if not self.tryMove(self.curPiece, self.curX, newY - 1):
break
newY -= 1
dropHeight += 1
self.pieceDropped(dropHeight)
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped(0)
def pieceDropped(self, dropHeight):
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.numPiecesDropped += 1
if self.numPiecesDropped % 25 == 0:
self.level += 1
self.timer.start(self.timeoutTime(), self)
self.levelChanged.emit(self.level)
self.score += dropHeight + 7
self.scoreChanged.emit(self.score)
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
numFullLines = 0
for i in range(TetrixBoard.BoardHeight - 1, -1, -1):
lineIsFull = True
for j in range(TetrixBoard.BoardWidth):
if self.shapeAt(j, i) == NoShape:
lineIsFull = False
break
if lineIsFull:
numFullLines += 1
for k in range(TetrixBoard.BoardHeight - 1):
for j in range(TetrixBoard.BoardWidth):
self.setShapeAt(j, k, self.shapeAt(j, k + 1))
for j in range(TetrixBoard.BoardWidth):
self.setShapeAt(j, TetrixBoard.BoardHeight - 1, NoShape)
if numFullLines > 0:
self.numLinesRemoved += numFullLines
self.score += 10 * numFullLines
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.timer.start(500, self)
self.isWaitingAfterLine = True
self.curPiece.setShape(NoShape)
self.update()
def newPiece(self):
self.curPiece = self.nextPiece
self.nextPiece.setRandomShape()
self.showNextPiece()
self.curX = TetrixBoard.BoardWidth // 2 + 1
self.curY = TetrixBoard.BoardHeight - 1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(NoShape)
self.timer.stop()
self.isStarted = False
def showNextPiece(self):
if self.nextPieceLabel is not None:
return
dx = self.nextPiece.maxX() - self.nextPiece.minX() + 1
dy = self.nextPiece.maxY() - self.nextPiece.minY() + 1
pixmap = QtGui.QPixmap(dx * self.squareWidth(), dy * self.squareHeight())
painter = QtGui.QPainter(pixmap)
painter.fillRect(pixmap.rect(), self.nextPieceLabel.palette().background())
for int in range(4):
x = self.nextPiece.x(i) - self.nextPiece.minX()
y = self.nextPiece.y(i) - self.nextPiece.minY()
self.drawSquare(painter, x * self.squareWidth(),
y * self.squareHeight(), self.nextPiece.shape())
self.nextPieceLabel.setPixmap(pixmap)
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.x(i)
y = newY - newPiece.y(i)
if x < 0 or x >= TetrixBoard.BoardWidth or y < 0 or y >= TetrixBoard.BoardHeight:
return False
if self.shapeAt(x, y) != NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.update()
return True
def drawSquare(self, painter, x, y, shape):
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QtGui.QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
painter.setPen(color.light())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.dark())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
class TetrixPiece(object):
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), (1, 0), (1, 1)),
((0, -1), (0, 0), (0, 1), (0, 2)),
((-1, 0), (0, 0), (1, 0), (0, 1)),
((0, 0), (1, 0), (0, 1), (1, 1)),
((-1, -1), (0, -1), (0, 0), (0, 1)),
((1, -1), (0, -1), (0, 0), (0, 1))
)
def __init__(self):
self.coords = [[0,0] for _ in range(4)]
self.pieceShape = NoShape
self.setShape(NoShape)
def shape(self):
return self.pieceShape
def setShape(self, shape):
table = TetrixPiece.coordsTable[shape]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
self.setShape(random.randint(1, 7))
def x(self, index):
return self.coords[index][0]
def y(self, index):
return self.coords[index][1]
def setX(self, index, x):
self.coords[index][0] = x
def setY(self, index, y):
self.coords[index][1] = y
def minX(self):
m = self.coords[0][0]
for i in range(4):
m = min(m, self.coords[i][0])
return m
def maxX(self):
m = self.coords[0][0]
for i in range(4):
m = max(m, self.coords[i][0])
return m
def minY(self):
m = self.coords[0][1]
for i in range(4):
m = min(m, self.coords[i][1])
return m
def maxY(self):
m = self.coords[0][1]
for i in range(4):
m = max(m, self.coords[i][1])
return m
def rotatedLeft(self):
if self.pieceShape == SquareShape:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, self.y(i))
result.setY(i, -self.x(i))
return result
def rotatedRight(self):
if self.pieceShape == SquareShape:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, -self.y(i))
result.setY(i, self.x(i))
return result
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = TetrixWindow()
window.show()
random.seed(None)
sys.exit(app.exec_())
|
minhtuancn/odoo
|
refs/heads/8.0
|
addons/payment_ogone/tests/test_ogone.py
|
430
|
# -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': 'norbert.buyer@example.com',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
|
LukeHoersten/ansible
|
refs/heads/devel
|
lib/ansible/plugins/shell/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
notriddle/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/pytest/testing/logging/test_fixture.py
|
30
|
# -*- coding: utf-8 -*-
import logging
import pytest
logger = logging.getLogger(__name__)
sublogger = logging.getLogger(__name__ + ".baz")
def test_fixture_help(testdir):
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines(["*caplog*"])
def test_change_level(caplog):
caplog.set_level(logging.INFO)
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
caplog.set_level(logging.CRITICAL, logger=sublogger.name)
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_change_level_undo(testdir):
"""Ensure that 'set_level' is undone after the end of the test"""
testdir.makepyfile(
"""
import logging
def test1(caplog):
caplog.set_level(logging.INFO)
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test1')
assert 0
def test2(caplog):
# using + operator here so fnmatch_lines doesn't match the code in the traceback
logging.info('log from ' + 'test2')
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
assert "log from test2" not in result.stdout.str()
def test_with_statement(caplog):
with caplog.at_level(logging.INFO):
logger.debug("handler DEBUG level")
logger.info("handler INFO level")
with caplog.at_level(logging.CRITICAL, logger=sublogger.name):
sublogger.warning("logger WARNING level")
sublogger.critical("logger CRITICAL level")
assert "DEBUG" not in caplog.text
assert "INFO" in caplog.text
assert "WARNING" not in caplog.text
assert "CRITICAL" in caplog.text
def test_log_access(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == "boo %s"
assert "boo arg" in caplog.text
def test_record_tuples(caplog):
caplog.set_level(logging.INFO)
logger.info("boo %s", "arg")
assert caplog.record_tuples == [(__name__, logging.INFO, "boo arg")]
def test_unicode(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert caplog.records[0].levelname == "INFO"
assert caplog.records[0].msg == u"bū"
assert u"bū" in caplog.text
def test_clear(caplog):
caplog.set_level(logging.INFO)
logger.info(u"bū")
assert len(caplog.records)
assert caplog.text
caplog.clear()
assert not len(caplog.records)
assert not caplog.text
@pytest.fixture
def logging_during_setup_and_teardown(caplog):
caplog.set_level("INFO")
logger.info("a_setup_log")
yield
logger.info("a_teardown_log")
assert [x.message for x in caplog.get_records("teardown")] == ["a_teardown_log"]
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
assert not caplog.records
assert not caplog.get_records("call")
logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reachers into private API, don't use this type of thing in real tests!
assert set(caplog._item.catch_log_handlers.keys()) == {"setup", "call"}
|
arthurdejong/python-stdnum
|
refs/heads/master
|
stdnum/br/cnpj.py
|
1
|
# cnpj.py - functions for handling CNPJ numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""CNPJ (Cadastro Nacional da Pessoa Jurídica, Brazilian company identifier).
Numbers from the national register of legal entities have 14 digits. The
first 8 digits identify the company, the following 4 digits identify a
business unit and the last 2 digits are check digits.
>>> validate('16.727.230/0001-97')
'16727230000197'
>>> validate('16.727.230.0001-98')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> validate('16.727.230/0001=97') # invalid delimiter
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('16727230000197')
'16.727.230/0001-97'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -./').strip()
def calc_check_digits(number):
"""Calculate the check digits for the number."""
d1 = (11 - sum(((3 - i) % 8 + 2) * int(n)
for i, n in enumerate(number[:12]))) % 11 % 10
d2 = (11 - sum(((4 - i) % 8 + 2) * int(n)
for i, n in enumerate(number[:12])) -
2 * d1) % 11 % 10
return '%d%d' % (d1, d2)
def validate(number):
"""Check if the number is a valid CNPJ. This checks the length and
whether the check digits are correct."""
number = compact(number)
if not isdigits(number) or int(number) <= 0:
raise InvalidFormat()
if len(number) != 14:
raise InvalidLength()
if calc_check_digits(number) != number[-2:]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number is a valid CNPJ."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
return (number[0:2] + '.' + number[2:5] + '.' + number[5:8] + '/' +
number[8:12] + '-' + number[12:])
|
mezz64/home-assistant
|
refs/heads/dev
|
tests/components/bond/test_init.py
|
7
|
"""Tests for the Bond module."""
from aiohttp import ClientConnectionError
from homeassistant.components.bond.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
from .common import patch_bond_version, patch_setup_entry, setup_bond_entity
from tests.common import MockConfigEntry
async def test_async_setup_no_domain_config(hass: HomeAssistant):
"""Test setup without configuration is noop."""
result = await async_setup_component(hass, DOMAIN, {})
assert result is True
async def test_async_setup_raises_entry_not_ready(hass: HomeAssistant):
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_version(side_effect=ClientConnectionError()):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state == ENTRY_STATE_SETUP_RETRY
async def test_async_setup_entry_sets_up_hub_and_supported_domains(hass: HomeAssistant):
"""Test that configuring entry sets up cover domain."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
with patch_bond_version(
return_value={
"bondid": "test-bond-id",
"target": "test-model",
"fw_ver": "test-version",
}
):
with patch_setup_entry(
"cover"
) as mock_cover_async_setup_entry, patch_setup_entry(
"fan"
) as mock_fan_async_setup_entry, patch_setup_entry(
"light"
) as mock_light_async_setup_entry, patch_setup_entry(
"switch"
) as mock_switch_async_setup_entry:
result = await setup_bond_entity(hass, config_entry, patch_device_ids=True)
assert result is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state == ENTRY_STATE_LOADED
assert config_entry.unique_id == "test-bond-id"
# verify hub device is registered correctly
device_registry = await dr.async_get_registry(hass)
hub = device_registry.async_get_device(
identifiers={(DOMAIN, "test-bond-id")}, connections=set()
)
assert hub.name == "test-bond-id"
assert hub.manufacturer == "Olibra"
assert hub.model == "test-model"
assert hub.sw_version == "test-version"
# verify supported domains are setup
assert len(mock_cover_async_setup_entry.mock_calls) == 1
assert len(mock_fan_async_setup_entry.mock_calls) == 1
assert len(mock_light_async_setup_entry.mock_calls) == 1
assert len(mock_switch_async_setup_entry.mock_calls) == 1
async def test_unload_config_entry(hass: HomeAssistant):
"""Test that configuration entry supports unloading."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
result = await setup_bond_entity(
hass,
config_entry,
patch_version=True,
patch_device_ids=True,
patch_platforms=True,
)
assert result is True
await hass.async_block_till_done()
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.entry_id not in hass.data[DOMAIN]
assert config_entry.state == ENTRY_STATE_NOT_LOADED
|
finfish/scrapy
|
refs/heads/master
|
extras/qps-bench-server.py
|
178
|
#!/usr/bin/env python
from __future__ import print_function
from time import time
from collections import deque
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.concurrent = 0
self.tail = deque(maxlen=100)
self._reset_stats()
def _reset_stats(self):
self.tail.clear()
self.start = self.lastmark = self.lasttime = time()
def getChild(self, request, name):
return self
def render(self, request):
now = time()
delta = now - self.lasttime
# reset stats on high iter-request times caused by client restarts
if delta > 3: # seconds
self._reset_stats()
return ''
self.tail.appendleft(delta)
self.lasttime = now
self.concurrent += 1
if now - self.lastmark >= 3:
self.lastmark = now
qps = len(self.tail) / sum(self.tail)
print('samplesize={0} concurrent={1} qps={2:0.2f}'.format(len(self.tail), self.concurrent, qps))
if 'latency' in request.args:
latency = float(request.args['latency'][0])
reactor.callLater(latency, self._finish, request)
return NOT_DONE_YET
self.concurrent -= 1
return ''
def _finish(self, request):
self.concurrent -= 1
if not request.finished and not request._disconnected:
request.finish()
root = Root()
factory = Site(root)
reactor.listenTCP(8880, factory)
reactor.run()
|
Grirrane/odoo
|
refs/heads/master
|
openerp/addons/base/module/wizard/base_module_upgrade.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.exceptions import UserError
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise UserError(_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/typeCommentLocalForTargetWithExistingComment_after.py
|
19
|
def func():
for var in 'foo': # type: [str] # comment
var
|
anbangr/trusted-nova
|
refs/heads/master
|
nova/rpc/__init__.py
|
5
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.openstack.common import cfg
from nova import utils
rpc_backend_opt = cfg.StrOpt('rpc_backend',
default='nova.rpc.impl_kombu',
help="The messaging module to use, defaults to kombu.")
FLAGS = flags.FLAGS
FLAGS.register_opt(rpc_backend_opt)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of nova.rpc.common.Connection
"""
return _get_impl().create_connection(new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
nova.rpc.common.Connection.create_consumer() and only applies
when the consumer was created with fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: nova.rpc.common.Timeout if a complete response is not received
before the timeout is reached.
"""
return _get_impl().call(context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
nova.rpc.common.Connection.create_consumer() and only applies
when the consumer was created with fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
nova.rpc.common.Connection.create_consumer() and only applies
when the consumer was created with fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
nova.rpc.common.Connection.create_consumer() and only applies
when the consumer was created with fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: nova.rpc.common.Timeout if a complete response is not received
before the timeout is reached.
"""
return _get_impl().multicall(context, topic, msg, timeout)
def notify(context, topic, msg):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:returns: None
"""
return _get_impl().notify(context, topic, msg)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(context, server_params, topic, msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(context, server_params, topic,
msg)
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until FLAGS are loaded."""
global _RPCIMPL
if _RPCIMPL is None:
_RPCIMPL = utils.import_object(FLAGS.rpc_backend)
return _RPCIMPL
|
antonve/s4-project-mooc
|
refs/heads/master
|
common/test/acceptance/pages/common/logout.py
|
162
|
"""
Logout Page.
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class LogoutPage(PageObject):
"""
Logout page to logout current logged in user.
"""
url = BASE_URL + "/logout"
def is_browser_on_page(self):
return self.q(css='.cta-login').present
|
ghtmtt/QGIS
|
refs/heads/master
|
tests/src/python/test_qgssvgcache.py
|
31
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSvgCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '29/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsSvgCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsSvgCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsSvgCache Tests</h1>\n"
self.fetched = True
QgsApplication.svgCache().remoteSvgFetched.connect(self.svgFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def svgFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteSVG(self):
"""Test fetching remote svg."""
url = 'http://localhost:{}/qgis_local_server/sample_svg.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG', 'remote_svg', image))
for i in range(1000):
QCoreApplication.processEvents()
def testRemoteSvgAsText(self):
"""Test fetching remote svg with text mime format - e.g. github raw svgs"""
url = 'http://localhost:{}/qgis_local_server/svg_as_text.txt'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'remote_svg', image))
for i in range(1000):
QCoreApplication.processEvents()
def testRemoteSvgBadMime(self):
"""Test fetching remote svg with bad mime type"""
url = 'http://localhost:{}/qgis_local_server/logo.png'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'waiting_svg', image))
# second should be correct image
self.waitForFetch()
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'bad_svg', image))
for i in range(1000):
QCoreApplication.processEvents()
def testRemoteSvgMissing(self):
"""Test fetching remote svg with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.svg'.format(str(TestQgsSvgCache.port)) # oooo naughty
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG missing', 'waiting_svg', image))
for i in range(1000):
QCoreApplication.processEvents()
def testRemoteSVGBlocking(self):
"""Test fetching remote svg."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/QGIS_logo_2017.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote SVG sync', 'remote_svg_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_svg.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
self.assertTrue(self.imageCheck('Remote SVG', 'remote_svg', image))
# missing
url = 'http://localhost:{}/qgis_local_server/xxx.svg'.format(str(TestQgsSvgCache.port)) # oooo naughty
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
self.assertTrue(self.imageCheck('Remote SVG missing', 'waiting_svg', image))
for i in range(1000):
QCoreApplication.processEvents()
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'svg_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("svg_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
|
rtucker-mozilla/inventory
|
refs/heads/master
|
mozdns/srv/models.py
|
2
|
from django.db import models
from mozdns.domain.models import Domain
from mozdns.models import MozdnsRecord
from mozdns.validation import validate_srv_label, validate_srv_port
from mozdns.validation import validate_srv_priority, validate_srv_weight
from mozdns.validation import validate_srv_name
from mozdns.validation import validate_srv_target
import reversion
from gettext import gettext as _
class SRV(MozdnsRecord):
"""
>>> SRV(label=label, domain=domain, target=target, port=port,
... priority=priority, weight=weight, ttl=ttl)
"""
id = models.AutoField(primary_key=True)
label = models.CharField(max_length=63, blank=True, null=True,
validators=[validate_srv_label],
help_text="Short name of the fqdn")
domain = models.ForeignKey(Domain, null=False, help_text="FQDN of the "
"domain after the short hostname. "
"(Ex: <i>Vlan</i>.<i>DC</i>.mozilla.com)")
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_srv_name])
# fqdn = label + domain.name <--- see set_fqdn
target = models.CharField(max_length=100,
validators=[validate_srv_target], blank=True,
null=True)
port = models.PositiveIntegerField(null=False,
validators=[validate_srv_port])
priority = models.PositiveIntegerField(null=False,
validators=[validate_srv_priority])
weight = models.PositiveIntegerField(null=False,
validators=[validate_srv_weight])
template = _("{bind_name:$lhs_just} {ttl_} {rdclass:$rdclass_just} "
"{rdtype:$rdtype_just} {priority:$prio_just} "
"{weight:$extra_just} {port:$extra_just} "
"{target:$extra_just}.")
search_fields = ("fqdn", "target")
def details(self):
return (
("FQDN", self.fqdn),
("Record Type", "SRV"),
("Targer", self.target),
("Port", self.port),
("Priority", self.priority),
("Weight", self.weight),
)
class Meta:
db_table = "srv"
unique_together = ("label", "domain", "target", "port")
@classmethod
def get_api_fields(cls):
return super(SRV, cls).get_api_fields() + [
'port', 'weight', 'priority', 'target']
@property
def rdtype(self):
return 'SRV'
reversion.register(SRV)
|
devoid/nova
|
refs/heads/sheepdog-nova-support
|
nova/tests/integrated/v3/test_pci.py
|
2
|
# Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.openstack.common import jsonutils
from nova.tests.integrated.v3 import api_sample_base
from nova.tests.integrated.v3 import test_servers
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
self.fake_compute_node = {"cpu_info": "?",
"current_workload": 0,
"disk_available_least": 0,
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": '043b3cacf6f34c90a724'
'5151fc8ebcda'},
"vcpus": 1,
"vcpus_used": 0,
"service_id": 2,
"pci_stats": [
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"extra_info": {
"phys_function": '[["0x0000", '
'"0x04", "0x00",'
' "0x1"]]',
"key1": "value1"}}]}
def test_pci_show(self):
def fake_compute_node_get(context, id):
self.fake_compute_node['pci_stats'] = jsonutils.dumps(
self.fake_compute_node['pci_stats'])
return self.fake_compute_node
self.stubs.Set(db, 'compute_node_get', fake_compute_node_get)
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
def test_pci_detail(self):
def fake_compute_node_get_all(context):
self.fake_compute_node['pci_stats'] = jsonutils.dumps(
self.fake_compute_node['pci_stats'])
return [self.fake_compute_node]
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
|
victorbriz/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/mac/gyptest-postbuild-copy-bundle.py
|
172
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a postbuild copying a dependend framework into an app bundle is
rerun if the resources in the framework change.
"""
import TestGyp
import os.path
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this pass with the make generator, http://crbug.com/95529
test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
CHDIR = 'postbuild-copy-bundle'
test.run_gyp('test.gyp', chdir=CHDIR)
app_bundle_dir = test.built_file_path('Test app.app', chdir=CHDIR)
bundled_framework_dir = os.path.join(
app_bundle_dir, 'Contents', 'My Framework.framework', 'Resources')
final_plist_path = os.path.join(bundled_framework_dir, 'Info.plist')
final_resource_path = os.path.join(bundled_framework_dir, 'resource_file.sb')
final_copies_path = os.path.join(
app_bundle_dir, 'Contents', 'My Framework.framework', 'Versions', 'A',
'Libraries', 'copied.txt')
# Check that the dependency was built and copied into the app bundle:
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_resource_path)
test.must_match(final_resource_path,
'This is included in the framework bundle.\n')
test.must_exist(final_plist_path)
test.must_contain(final_plist_path, '''\
\t<key>RandomKey</key>
\t<string>RandomValue</string>''')
# Touch the dependency's bundle resource, and check that the modification
# makes it all the way into the app bundle:
test.sleep()
test.write('postbuild-copy-bundle/resource_file.sb', 'New text\n')
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_resource_path)
test.must_match(final_resource_path, 'New text\n')
# Check the same for the plist file.
test.sleep()
contents = test.read('postbuild-copy-bundle/Framework-Info.plist')
contents = contents.replace('RandomValue', 'NewRandomValue')
test.write('postbuild-copy-bundle/Framework-Info.plist', contents)
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_plist_path)
test.must_contain(final_plist_path, '''\
\t<key>RandomKey</key>
\t<string>NewRandomValue</string>''')
# Check the same for the copies section, test for http://crbug.com/157077
test.sleep()
contents = test.read('postbuild-copy-bundle/copied.txt')
contents = contents.replace('old', 'new')
test.write('postbuild-copy-bundle/copied.txt', contents)
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_copies_path)
test.must_contain(final_copies_path, 'new copied file')
test.pass_test()
|
giruenf/GRIPy
|
refs/heads/master
|
algo/KusterToksoz.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 14:25:07 2013
@author: fsantos2
"""
import numpy as np
__all__ = ['T', 'F', 'Kd', 'Gd']
def T(K, G, a): # Tiijj/3
R = 1.0 / (K / G + 4.0 / 3.0)
C0 = 1.0 - a * a
t = a * np.arccos(a) / C0 / np.sqrt(C0) - a * a / (C0)
f = a * a * (3.0 * t - 2.0) / C0
C1 = (R - 1.0) * (f + t)
C2 = (R - 1.0) * (f - t)
F1 = 1.0 - 4.0 * R / 3.0 + R * t + 1.5 * C1
F2 = R * (t * t * (4.0 * R - 3.0) + 2.0 * C2)
return F1 / F2
def F(K, G, a): # (Tijij - Tiijj/3)/5
R = 1.0 / (K / G + 4.0 / 3.0)
C0 = 1.0 - a * a
t = a * np.arccos(a) / C0 / np.sqrt(C0) - a * a / (C0)
f = a * a * (3.0 * t - 2.0) / C0
C1 = (R - 1.0) * (f + t)
C2 = (R - 1.0) * (f - t)
F2 = R * (t * t * (4.0 * R - 3.0) + 2.0 * C2)
F3 = 0.5 * t - C1
F4 = 1.0 - t + 0.25 * C2
F5 = 4.0 * R / 3.0 - t - C1
F6 = t + C1
F7 = 2.0 + t * (2.0 * R - 3.0) + 0.75 * C2
F8 = (1.0 - t) * (2.0 * R - 1.0) - 0.5 * C1
F9 = t - C2
return (2.0 / F3 + 1.0 / F4 + (F4 * F5 + F6 * F7 - F8 * F9) / F2 / F4) / 5.0
def T2(K, G, a, Ki=0.0, Gi=0.0):
A = Gi / G - 1.0
B = (Ki / K - Gi / G) / 3.0
R = 1.0 / (K / G + 4.0 / 3.0)
C1 = 1.0 - a * a
t = a * np.arccos(a) / C1 / np.sqrt(C1) - a * a / (C1)
f = a * a * (3.0 * t - 2.0) / C1
F1 = 1.0 + A * (1.5 * (f + t) - R * (1.5 * f + 2.5 * t - 4.0 / 3.0))
F2 = 1.0 + A * (1.0 + 1.5 * (f + t) - R * (1.5 * f + 2.5 * t)) + B * (3.0 - 4.0 * R) \
+ 0.5 * A * (A + 3.0 * B) * (3.0 - 4.0 * R) * (f + t - R * (f - t + 2.0 * t * t))
return F1 / F2
def F2(K, G, a, Ki=0.0, Gi=0.0):
A = Gi / G - 1.0
B = (Ki / K - Gi / G) / 3.0
R = 1.0 / (K / G + 4.0 / 3.0)
C1 = 1.0 - a * a
t = a * np.arccos(a) / C1 / np.sqrt(C1) - a * a / (C1)
f = a * a * (3.0 * t - 2.0) / C1
F2 = 1.0 + A * (1.0 + 1.5 * (f + t) - R * (1.5 * f + 2.5 * t)) + B * (3.0 - 4.0 * R) \
+ 0.5 * A * (A + 3.0 * B) * (3.0 - 4.0 * R) * (f + t - R * (f - t + 2.0 * t * t))
F3 = 1.0 + A * (1.0 - (f + 1.5 * t) + R * (f + t))
F4 = 1.0 + 0.25 * A * (f + 3.0 * t - R * (f - t))
F5 = A * (-f + R * (f + t - 4.0 / 3.0)) + B * t * (3.0 - 4.0 * R)
F6 = 1.0 + A * (1.0 + f - R * (f + t)) + B * (1.0 - t) * (3.0 - 4.0 * R)
F7 = 2.0 + 0.25 * A * (3.0 * f + 9.0 * t - R * (3.0 * f + 5.0 * t)) + B * t * (3.0 - 4.0 * R)
F8 = A * (1.0 - 2.0 * R + 0.5 * f * (R - 1.0) + 0.5 * t * (5.0 * R - 3.0)) \
+ B * (1.0 - t) * (3.0 - 4 * R)
F9 = A * ((R - 1.0) * f - R * t) + B * t * (3.0 - 4.0 * R)
return (2.0 / F3 + 1.0 / F4 + (F4 * F5 + F6 * F7 - F8 * F9) / F2 / F4) / 5.0
def Kd(Km, Gm, a, phi):
p = phi * T(Km, Gm, a)
C1 = 3.0 * Km + 4.0 * Gm
return Km * (C1 - 4.0 * Gm * p) / (C1 + 3.0 * Km * p)
def Gd(Km, Gm, a, phi):
q = phi * F(Km, Gm, a)
C1 = 3.0 * Km + 4.0 * Gm
return Gm * (1.0 - C1 * q / (C1 + 1.2 * (Km + 2.0 * Gm) * q))
# def Kd2(Km, Gm, a, phi):
# p2=phi*T2(Km, Gm, a)
# C1 =
|
rmfitzpatrick/pytest
|
refs/heads/master
|
_pytest/debugging.py
|
15
|
""" interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import, division, print_function
import pdb
import sys
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
'--pdb', dest="usepdb", action="store_true",
help="start the interactive Python debugger on errors.")
group._addoption(
'--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
def pytest_configure(config):
if config.getvalue("usepdb_cls"):
modname, classname = config.getvalue("usepdb_cls").split(":")
__import__(modname)
pdb_cls = getattr(sys.modules[modname], classname)
else:
pdb_cls = pdb.Pdb
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
old = (pdb.set_trace, pytestPDB._pluginmanager)
def fin():
pdb.set_trace, pytestPDB._pluginmanager = old
pytestPDB._config = None
pytestPDB._pdb_cls = pdb.Pdb
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
pytestPDB._pdb_cls = pdb_cls
config._cleanup.append(fin)
class pytestPDB:
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_pdb_cls = pdb.Pdb
@classmethod
def set_trace(cls):
""" invoke PDB set_trace debugging, dropping any IO capturing. """
import _pytest.config
frame = sys._getframe().f_back
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config)
cls._pdb_cls().set_trace(frame)
class PdbInvoke:
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
out, err = capman.suspend_global_capture(in_=True)
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush()
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
rep._pdbshown = True
return rep
def _postmortem_traceback(excinfo):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
from doctest import UnexpectedException
if isinstance(excinfo.value, UnexpectedException):
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pytestPDB._pdb_cls):
def get_stack(self, f, t):
stack, i = pdb.Pdb.get_stack(self, f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
|
gperdomor/dendrite
|
refs/heads/master
|
vendor/src/github.com/apache/thrift/lib/py/src/transport/__init__.py
|
348
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__all__ = ['TTransport', 'TSocket', 'THttpClient', 'TZlibTransport']
|
flashycud/timestack
|
refs/heads/master
|
django/contrib/comments/signals.py
|
425
|
"""
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
adedayo/intellij-community
|
refs/heads/master
|
python/lib/Lib/modjy/modjy_impl.py
|
109
|
###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import types
import sys
from modjy_exceptions import *
class modjy_impl:
def deal_with_app_return(self, environ, start_response_callable, app_return):
self.log.debug("Processing app return type: %s" % str(type(app_return)))
if isinstance(app_return, types.StringTypes):
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
if type(app_return) is types.FileType:
pass # TBD: What to do here? can't call fileno()
if hasattr(app_return, '__len__') and callable(app_return.__len__):
expected_pieces = app_return.__len__()
else:
expected_pieces = -1
try:
try:
ix = 0
for next_piece in app_return:
if not isinstance(next_piece, types.StringTypes):
raise NonStringOutput("Application returned iterable containing non-strings: %s" % str(type(next_piece)))
if ix == 0:
# The application may have called start_response in the first iteration
if not start_response_callable.called:
raise StartResponseNotCalled("Start_response callable was never called.")
if not start_response_callable.content_length \
and expected_pieces == 1 \
and start_response_callable.write_callable.num_writes == 0:
# Take the length of the first piece
start_response_callable.set_content_length(len(next_piece))
start_response_callable.write_callable(next_piece)
ix += 1
if ix == expected_pieces:
break
if expected_pieces != -1 and ix != expected_pieces:
raise WrongLength("Iterator len() was wrong. Expected %d pieces: got %d" % (expected_pieces, ix) )
except AttributeError, ax:
if str(ax) == "__getitem__":
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
else:
raise ax
except TypeError, tx:
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
except ModjyException, mx:
raise mx
except Exception, x:
raise ApplicationException(x)
finally:
if hasattr(app_return, 'close') and callable(app_return.close):
app_return.close()
def init_impl(self):
self.do_j_env_params()
def add_packages(self, package_list):
packages = [p.strip() for p in package_list.split(';')]
for p in packages:
self.log.info("Adding java package %s to jython" % p)
sys.add_package(p)
def add_classdirs(self, classdir_list):
classdirs = [cd.strip() for cd in classdir_list.split(';')]
for cd in classdirs:
self.log.info("Adding directory %s to jython class file search path" % cd)
sys.add_classdir(cd)
def add_extdirs(self, extdir_list):
extdirs = [ed.strip() for ed in extdir_list.split(';')]
for ed in extdirs:
self.log.info("Adding directory %s for .jars and .zips search path" % ed)
sys.add_extdir(self.expand_relative_path(ed))
def do_j_env_params(self):
if self.params['packages']:
self.add_packages(self.params['packages'])
if self.params['classdirs']:
self.add_classdirs(self.params['classdirs'])
if self.params['extdirs']:
self.add_extdirs(self.params['extdirs'])
|
pbhide/net-next-rocker
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
617
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
mominaripublic/jubileealbum
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
slank/ansible
|
refs/heads/devel
|
test/units/playbook/test_conditional.py
|
78
|
from ansible.compat.tests import unittest
from units.mock.loader import DictDataLoader
from ansible.plugins.strategy import SharedPluginLoaderObj
from ansible.template import Templar
from ansible import errors
from ansible.playbook import conditional
class TestConditional(unittest.TestCase):
def setUp(self):
self.loader = DictDataLoader({})
self.cond = conditional.Conditional(loader=self.loader)
self.shared_loader = SharedPluginLoaderObj()
self.templar = Templar(loader=self.loader, variables={})
def _eval_con(self, when=None, variables=None):
when = when or []
variables = variables or {}
self.cond.when = when
ret = self.cond.evaluate_conditional(self.templar, variables)
return ret
def test_false(self):
when = [u"False"]
ret = self._eval_con(when, {})
self.assertFalse(ret)
def test_true(self):
when = [u"True"]
ret = self._eval_con(when, {})
self.assertTrue(ret)
def test_undefined(self):
when = [u"{{ some_undefined_thing }}"]
self.assertRaisesRegexp(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed",
self._eval_con, when, {})
def test_defined(self):
variables = {'some_defined_thing': True}
when = [u"{{ some_defined_thing }}"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_multiple_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined",
u"some_defined_dict.key2 is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_undefined_values(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
when = [u"some_defined_dict_with_undefined_values is defined"]
self.assertRaisesRegexp(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
self._eval_con,
when, variables)
def test_nested_hostvars_undefined_values(self):
variables = {'dict_value': 1,
'hostvars': {'host1': {'key1': 'value1',
'key2': '{{ dict_value }}'},
'host2': '{{ dict_value }}',
'host3': '{{ undefined_dict_value }}',
# no host4
},
'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'}
}
when = [u"some_dict.some_dict_key1 == hostvars['host3']"]
#self._eval_con(when, variables)
self.assertRaisesRegexp(errors.AnsibleError,
"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed",
#"The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed",
#"The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_bare(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
# raises an exception when a non-string conditional is passed to extract_defined_undefined()
when = [u"some_defined_dict_with_undefined_values"]
self.assertRaisesRegexp(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
when = [u"some_defined_dict_with_undefined_values is defined"]
self.assertRaisesRegexp(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
self._eval_con,
when, variables)
def test_is_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined_reversed(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_not_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_not_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_undefined_thing is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.double }} is defined",
u"{{ compare_targets.single }} is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined_but_is_not_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.triple }} is defined",
u"{{ compare_targets.quadruple }} is defined"]
self.assertRaisesRegexp(errors.AnsibleError,
"The conditional check '{{ compare_targets.triple }} is defined' failed",
self._eval_con,
when, variables)
def test_is_hostvars_host_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_hostvars_host_undefined_is_undefined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_not_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
|
chouseknecht/openshift-restclient-python
|
refs/heads/master
|
openshift/ansiblegen/cli.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import sys
from logging import config
from .. import __version__
from ..helper.exceptions import KubernetesException
from .docstrings import KubernetesDocStrings, OpenShiftDocStrings
from .modules import Modules
logger = logging.getLogger(__name__)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
},
},
'loggers': {
'openshift.ansiblegen': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
},
'openshift.helper.ansible': {
'level': 'ERROR',
'propogate': False
}
},
'root': {
'handlers': ['console'],
'level': 'ERROR'
}
}
AVAILABLE_COMMANDS = {
'help': 'Display this help message',
'version': 'Display version information',
'modules': 'Generates and writes to the filesystem one or more Ansible modules.',
'docstrings': 'Displays Ansible module doc strings for one or more models.',
}
def subcmd_modules_parser(_, subparser):
subparser.add_argument('models', action='store',
help=u'Optional list of models for which to generate modules. Specify '
u'the model Kind using either CamelCase or snake_case.',
nargs='*', )
subparser.add_argument('-v', '--api-version', action='store',
help=u'When specified, Modules will only be generated for models '
u'that are part of API version.',
dest='api_version', default=None)
subparser.add_argument('-o', '--output-path', action='store',
help=u'Specify a path to a directory where modules will be written. '
u'Defaults to ./_modules. If the directory does not exist, it '
u'will be created.',
dest='output_path', default="./_modules")
subparser.add_argument('-s', '--suppress-stdout', action='store_true',
help=u'Suppress writing progress messages to stdout.',
dest='suppress_stdout', default=False)
def subcmd_docstrings_parser(_, subparser):
subparser.add_argument('models', action='store',
help=u'List of models for which doc strings will be generated. Specify '
u'the model Kind using either CamelCase or snake_case.',
nargs='*', )
subparser.add_argument('-v', '--api-version', action='store',
help=u'API version. Defaults to v1.',
dest='api_version', default='v1')
def subcmd_version_parser(parser, subparser):
pass
def subcmd_help_parser(parser, subparser):
pass
def run_docstrings_cmd(**kwargs):
"""
Send documentation string and return string to stdout for each model requested
:param kwargs: parser arguments
:return: None
"""
models = kwargs.pop('models')
api_version = kwargs.pop('api_version')
if not models:
raise KubernetesException(
"ERROR: you must provide one or more models for the docstrings command."
)
for model in models:
try:
strings = KubernetesDocStrings(model=model, api_version=api_version)
except KubernetesException:
try:
strings = OpenShiftDocStrings(model=model, api_version=api_version)
except KubernetesException:
raise
print("DOCUMENTATION = '''")
print(strings.documentation)
print("'''\n")
print("RETURN = '''")
print(strings.return_block)
print("'''\n")
def run_modules_cmd(**kwargs):
"""
Generate modules
:param kwargs: parser arguments
:return: None
"""
modules = Modules(**kwargs)
modules.generate_modules()
def commandline():
"""
Entrypoint for openshift-ansible-gen
:return: None
"""
parser = argparse.ArgumentParser(description=u'Uses the OpenShift API models to generate Ansible artifacts')
parser.add_argument('--debug', action='store_true', dest='debug',
help=u'enable debug output', default=False)
subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
subparsers.required = True
for subcommand in AVAILABLE_COMMANDS:
if globals().get('subcmd_%s_parser' % subcommand):
subparser = subparsers.add_parser(subcommand, help=AVAILABLE_COMMANDS[subcommand])
globals()['subcmd_%s_parser' % subcommand](parser, subparser)
args = parser.parse_args()
if args.debug:
# enable debug output
LOGGING['loggers']['openshift.ansiblegen']['level'] = 'DEBUG'
config.dictConfig(LOGGING)
if args.subcommand == 'help':
parser.print_help()
sys.exit(0)
elif args.subcommand == 'version':
logger.info("{0} version is {1}".format(__name__, __version__))
sys.exit(0)
elif args.subcommand == 'modules':
if args.suppress_stdout:
# disable output
LOGGING['loggers']['openshift.ansiblegen']['level'] = 'CRITICAL'
try:
globals()['run_{}_cmd'.format(args.subcommand)](**vars(args))
except Exception:
raise
sys.exit(0)
|
danielvdao/TheAnimalFarm
|
refs/heads/master
|
venv/lib/python2.7/site-packages/werkzeug/debug/__init__.py
|
310
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
|
CottageLabs/finance
|
refs/heads/develop
|
migrations/versions/fa0f07475596_create_projects_table.py
|
1
|
"""create projects table
Revision ID: fa0f07475596
Revises: 370e142881f5
Create Date: 2016-03-04 18:48:01.297273
"""
# revision identifiers, used by Alembic.
revision = 'fa0f07475596'
down_revision = '370e142881f5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('projects',
sa.Column('url', sa.String(length=255), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('contact', sa.String(length=255), nullable=True),
sa.Column('budget', sa.Integer(), nullable=True),
sa.Column('is_ir35', sa.Boolean(), nullable=True),
sa.Column('status', sa.String(length=255), nullable=True),
sa.Column('budget_units', sa.String(length=255), nullable=True),
sa.Column('normal_billing_rate', sa.Numeric(precision=10, scale=2), nullable=True),
sa.Column('hours_per_day', sa.Numeric(precision=10, scale=2), nullable=True),
sa.Column('uses_project_invoice_sequence', sa.Boolean(), nullable=True),
sa.Column('currency', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('url')
)
def downgrade():
op.drop_table('projects')
|
inoshiro/symposion-demo
|
refs/heads/master
|
symposion_project/apps/symposion/email_lists/accepted_speakers.py
|
5
|
def email_list():
from symposion.schedule.models import Presentation
speakers = {}
for presentation in Presentation.objects.select_related("speaker__user"):
for speaker in presentation.speakers():
if speaker is not None and speaker.user is not None:
speakers[speaker.user.email] = {}
return speakers.iteritems()
|
damdam-s/bank-payment
|
refs/heads/8.0
|
account_banking_payment_transfer/model/payment_mode.py
|
11
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# (C) 2014 Akretion (www.akretion.com)
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class PaymentMode(models.Model):
_inherit = "payment.mode"
transfer_account_id = fields.Many2one(
'account.account', string='Transfer account',
domain=[('type', '=', 'other'), ('reconcile', '=', True)],
help='Pay off lines in sent orders with a move on this '
'account. You can only select accounts of type regular '
'that are marked for reconciliation')
transfer_journal_id = fields.Many2one(
'account.journal', string='Transfer journal',
help='Journal to write payment entries when confirming '
'a debit order of this mode')
transfer_move_option = fields.Selection([
('date', 'One move per payment date'),
('line', 'One move per payment line'),
], string='Transfer move option', default='date')
|
paetzke/format-sql
|
refs/heads/master
|
tests/data/test_02/test_03_expected.py
|
1
|
def args():
X.objects.raw("""
SELECT
*
FROM
k; """)
|
chand3040/sree_odoo
|
refs/heads/master
|
openerp/addons/website_blog/__init__.py
|
373
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
import wizard
|
balloob/home-assistant
|
refs/heads/dev
|
tests/components/fritzbox/test_climate.py
|
13
|
"""Tests for AVM Fritz!Box climate component."""
from datetime import timedelta
from requests.exceptions import HTTPError
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
DOMAIN,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_COMFORT,
PRESET_ECO,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.fritzbox.const import (
ATTR_STATE_BATTERY_LOW,
ATTR_STATE_DEVICE_LOCKED,
ATTR_STATE_HOLIDAY_MODE,
ATTR_STATE_LOCKED,
ATTR_STATE_SUMMER_MODE,
ATTR_STATE_WINDOW_OPEN,
DOMAIN as FB_DOMAIN,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_TEMPERATURE,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from . import MOCK_CONFIG, FritzDeviceClimateMock
from tests.async_mock import Mock, call
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake_name"
async def setup_fritzbox(hass: HomeAssistantType, config: dict):
"""Set up mock AVM Fritz!Box."""
assert await async_setup_component(hass, FB_DOMAIN, config) is True
await hass.async_block_till_done()
async def test_setup(hass: HomeAssistantType, fritz: Mock):
"""Test setup of platform."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.attributes[ATTR_BATTERY_LEVEL] == 23
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 18
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake_name"
assert state.attributes[ATTR_HVAC_MODES] == [HVAC_MODE_HEAT, HVAC_MODE_OFF]
assert state.attributes[ATTR_MAX_TEMP] == 28
assert state.attributes[ATTR_MIN_TEMP] == 8
assert state.attributes[ATTR_PRESET_MODE] is None
assert state.attributes[ATTR_PRESET_MODES] == [PRESET_ECO, PRESET_COMFORT]
assert state.attributes[ATTR_STATE_BATTERY_LOW] is True
assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device"
assert state.attributes[ATTR_STATE_HOLIDAY_MODE] == "fake_holiday"
assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked"
assert state.attributes[ATTR_STATE_SUMMER_MODE] == "fake_summer"
assert state.attributes[ATTR_STATE_WINDOW_OPEN] == "fake_window"
assert state.attributes[ATTR_TEMPERATURE] == 19.5
assert state.state == HVAC_MODE_HEAT
async def test_target_temperature_on(hass: HomeAssistantType, fritz: Mock):
"""Test turn device on."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
device.target_temperature = 127.0
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.attributes[ATTR_TEMPERATURE] == 30
async def test_target_temperature_off(hass: HomeAssistantType, fritz: Mock):
"""Test turn device on."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
device.target_temperature = 126.5
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.attributes[ATTR_TEMPERATURE] == 0
async def test_update(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 18
assert state.attributes[ATTR_MAX_TEMP] == 28
assert state.attributes[ATTR_MIN_TEMP] == 8
assert state.attributes[ATTR_TEMPERATURE] == 19.5
device.actual_temperature = 19
device.target_temperature = 20
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert device.update.call_count == 1
assert state
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 19
assert state.attributes[ATTR_TEMPERATURE] == 20
async def test_update_error(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceClimateMock()
device.update.side_effect = HTTPError("Boom")
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert device.update.call_count == 0
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 1
assert fritz().login.call_count == 2
async def test_set_temperature_temperature(hass: HomeAssistantType, fritz: Mock):
"""Test setting temperature by temperature."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 123},
True,
)
assert device.set_target_temperature.call_args_list == [call(123)]
async def test_set_temperature_mode_off(hass: HomeAssistantType, fritz: Mock):
"""Test setting temperature by mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_HVAC_MODE: HVAC_MODE_OFF,
ATTR_TEMPERATURE: 123,
},
True,
)
assert device.set_target_temperature.call_args_list == [call(0)]
async def test_set_temperature_mode_heat(hass: HomeAssistantType, fritz: Mock):
"""Test setting temperature by mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_HVAC_MODE: HVAC_MODE_HEAT,
ATTR_TEMPERATURE: 123,
},
True,
)
assert device.set_target_temperature.call_args_list == [call(22)]
async def test_set_hvac_mode_off(hass: HomeAssistantType, fritz: Mock):
"""Test setting hvac mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_OFF},
True,
)
assert device.set_target_temperature.call_args_list == [call(0)]
async def test_set_hvac_mode_heat(hass: HomeAssistantType, fritz: Mock):
"""Test setting hvac mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
True,
)
assert device.set_target_temperature.call_args_list == [call(22)]
async def test_set_preset_mode_comfort(hass: HomeAssistantType, fritz: Mock):
"""Test setting preset mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_COMFORT},
True,
)
assert device.set_target_temperature.call_args_list == [call(22)]
async def test_set_preset_mode_eco(hass: HomeAssistantType, fritz: Mock):
"""Test setting preset mode."""
device = FritzDeviceClimateMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_ECO},
True,
)
assert device.set_target_temperature.call_args_list == [call(16)]
async def test_preset_mode_update(hass: HomeAssistantType, fritz: Mock):
"""Test preset mode."""
device = FritzDeviceClimateMock()
device.comfort_temperature = 98
device.eco_temperature = 99
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.attributes[ATTR_PRESET_MODE] is None
device.target_temperature = 98
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert device.update.call_count == 1
assert state
assert state.attributes[ATTR_PRESET_MODE] == PRESET_COMFORT
device.target_temperature = 99
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert device.update.call_count == 2
assert state
assert state.attributes[ATTR_PRESET_MODE] == PRESET_ECO
|
SophosBlitz/glacon
|
refs/heads/master
|
plugins/help.py
|
25
|
import re
from util import hook
@hook.command(autohelp=False)
def help(inp, bot=None, pm=None):
".help [command] -- gives a list of commands/help for a command"
funcs = {}
disabled = bot.config.get('disabled_plugins', [])
disabled_comm = bot.config.get('disabled_commands', [])
for command, (func, args) in bot.commands.iteritems():
fn = re.match(r'^plugins.(.+).py$', func._filename)
if fn.group(1).lower() not in disabled:
if command not in disabled_comm:
if func.__doc__ is not None:
if func in funcs:
if len(funcs[func]) < len(command):
funcs[func] = command
else:
funcs[func] = command
commands = dict((value, key) for key, value in funcs.iteritems())
if not inp:
pm('available commands: ' + ' '.join(sorted(commands)))
else:
if inp in commands:
pm(commands[inp].__doc__)
|
selivan/ansible-modules-core
|
refs/heads/devel
|
files/assemble.py
|
79
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import shutil
import tempfile
import re
DOCUMENTATION = '''
---
module: assemble
short_description: Assembles a configuration file from fragments
description:
- Assembles a configuration file from fragments. Often a particular
program will take a single configuration file and does not support a
C(conf.d) style structure where it is easy to build up the configuration
from multiple sources. M(assemble) will take a directory of files that can be
local or have already been transferred to the system, and concatenate them
together to produce a destination file. Files are assembled in string sorting order.
Puppet calls this idea I(fragments).
version_added: "0.5"
options:
src:
description:
- An already existing directory full of source files.
required: true
default: null
aliases: []
dest:
description:
- A file to create using the concatenation of all of the source files.
required: true
default: null
backup:
description:
- Create a backup file (if C(yes)), including the timestamp information so
you can get the original file back if you somehow clobbered it
incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
delimiter:
description:
- A delimiter to separate the file contents.
version_added: "1.4"
required: false
default: null
remote_src:
description:
- If False, it will search for src at originating/master machine, if True it will
go to the remote/target machine for the src. Default is True.
choices: [ "True", "False" ]
required: false
default: "True"
version_added: "1.4"
regexp:
description:
- Assemble files only if C(regex) matches the filename. If not set,
all files are assembled. All "\\" (backslash) must be escaped as
"\\\\" to comply yaml syntax. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
required: false
default: null
ignore_hidden:
description:
- A boolean that controls if files that start with a '.' will be included or not.
required: false
default: false
version_added: "2.0"
validate:
description:
- The validation command to run before copying into place. The path to the file to
validate is passed in via '%s' which must be present as in the sshd example below.
The command is passed securely so shell features like expansion and pipes won't work.
required: false
default: null
version_added: "2.0"
author: "Stephen Fromm (@sfromm)"
extends_documentation_fragment:
- files
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf
# When a delimiter is specified, it will be inserted in between each fragment
- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###'
# Copy a new "sshd_config" file into place, after passing validation with sshd
- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s'
'''
# ===========================================
# Support method
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = file(fragment).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
# ==============================================================
# main
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=True),
delimiter = dict(required=False),
dest = dict(required=True),
backup=dict(default=False, type='bool'),
remote_src=dict(default=False, type='bool'),
regexp = dict(required=False),
ignore_hidden = dict(default=False, type='bool'),
validate = dict(required=False, type='str'),
),
add_file_common_args=True
)
changed = False
path_md5 = None # Deprecated
path_hash = None
dest_hash = None
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
backup = module.params['backup']
delimiter = module.params['delimiter']
regexp = module.params['regexp']
compiled_regexp = None
ignore_hidden = module.params['ignore_hidden']
validate = module.params.get('validate', None)
if not os.path.exists(src):
module.fail_json(msg="Source (%s) does not exist" % src)
if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src)
if regexp != None:
try:
compiled_regexp = re.compile(regexp)
except re.error, e:
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp))
path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden)
path_hash = module.sha1(path)
if os.path.exists(dest):
dest_hash = module.sha1(dest)
if path_hash != dest_hash:
if backup and dest_hash is not None:
module.backup_local(dest)
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % validate)
(rc, out, err) = module.run_command(validate % path)
if rc != 0:
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
shutil.copy(path, dest)
changed = True
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
os.remove(path)
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *
main()
|
robertour/commas
|
refs/heads/master
|
plugins/cms_plugins.py
|
1
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from cms.plugins.text.models import Text
from cms.plugins.text.cms_plugins import TextPlugin as TextPluginCMS
from cmsplugin_filer_file.cms_plugins import FilerFilePlugin
from django.contrib.admin import TabularInline
from models import Credential, Reference, Service, Team, Member, Widget, SocialLink
class CredentialPlugin(CMSPluginBase):
model = Credential
name = _("Crendential Plugin")
render_template = "credential.html"
module = 'Commas & Industry'
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(CredentialPlugin)
class ReferencePlugin(CMSPluginBase):
model = Reference
name = _("Reference Plugin")
render_template = "reference.html"
module = 'Commas & Industry'
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(ReferencePlugin)
class ServicePlugin(CMSPluginBase):
model = Service
name = _("Service Plugin")
render_template = "service.html"
module = 'Commas & Industry'
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(ServicePlugin)
class TeamPlugin(CMSPluginBase):
model = Team
name = _("Team Plugin")
render_template = "team.html"
module = 'Commas & Industry'
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(TeamPlugin)
class SocialLinkInline(TabularInline):
model = SocialLink
extra = 1
class MemberPlugin(CMSPluginBase):
model = Member
name = _("Member Plugin")
render_template = "member.html"
module = 'Commas & Industry'
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
inlines = [SocialLinkInline]
plugin_pool.register_plugin(MemberPlugin)
class TextPlugin(CMSPluginBase):
model = Text
name = _("Text/HTML Plugin")
render_template = "text.html"
plugin_pool.unregister_plugin(TextPluginCMS)
plugin_pool.register_plugin(TextPlugin)
class EditorTextPlugin(TextPluginCMS):
name = _("TinyMCE Text Plugin")
plugin_pool.register_plugin(EditorTextPlugin)
class FilerBackgroundImage(FilerFilePlugin):
name = _("Background Image")
exclude = ['title', 'target_blank']
render_template = "background_image.html"
plugin_pool.register_plugin(FilerBackgroundImage)
class WidgetPlugin(CMSPluginBase):
model = Widget
name = _("Sidebar Widget Plugin")
render_template = "widget.html"
def render(self, context, instance, placeholder):
context['instance'] = instance
return context
plugin_pool.register_plugin(WidgetPlugin)
|
alekz112/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/distributions/tests/distparams.py
|
38
|
distcont = [
['alpha', (3.5704770516650459,)],
['anglit', ()],
['arcsine', ()],
['beta', (2.3098496451481823, 0.62687954300963677)],
['betaprime', (5, 6)], # avoid unbound error in entropy with (100, 86)],
['bradford', (0.29891359763170633,)],
['burr', (10.5, 4.3)], #incorrect mean and var for(0.94839838075366045, 4.3820284068855795)],
['cauchy', ()],
['chi', (78,)],
['chi2', (55,)],
['cosine', ()],
['dgamma', (1.1023326088288166,)],
['dweibull', (2.0685080649914673,)],
['erlang', (20,)], #correction numargs = 1
['expon', ()],
['exponpow', (2.697119160358469,)],
['exponweib', (2.8923945291034436, 1.9505288745913174)],
['f', (29, 18)],
#['fatiguelife', (29,)], #correction numargs = 1, variance very large
['fatiguelife', (2,)],
['fisk', (3.0857548622253179,)],
['foldcauchy', (4.7164673455831894,)],
['foldnorm', (1.9521253373555869,)],
['frechet_l', (3.6279911255583239,)],
['frechet_r', (1.8928171603534227,)],
['gamma', (1.9932305483800778,)],
['gausshyper', (13.763771604130699, 3.1189636648681431,
2.5145980350183019, 5.1811649903971615)], #veryslow
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
['genextreme', (-0.1,)], # sample mean test fails for (3.3184017469423535,)],
['gengamma', (4.4162385429431925, 3.1193091679242761)],
['genhalflogistic', (0.77274727809929322,)],
['genlogistic', (0.41192440799679475,)],
['genpareto', (0.1,)], # use case with finite moments
['gilbrat', ()],
['gompertz', (0.94743713075105251,)],
['gumbel_l', ()],
['gumbel_r', ()],
['halfcauchy', ()],
['halflogistic', ()],
['halfnorm', ()],
['hypsecant', ()],
#['invgamma', (2.0668996136993067,)], #convergence problem with expect
#['invgamma', (3.0,)],
['invgamma', (5.0,)], #kurtosis requires alpha > 4
['invnorm', (0.14546264555347513,)],
['invweibull', (10.58,)], # sample mean test fails at(0.58847112119264788,)]
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
['ksone', (1000,)], #replace 22 by 100 to avoid failing range, ticket 956
['kstwobign', ()],
['laplace', ()],
['levy', ()],
['levy_l', ()],
# ['levy_stable', (0.35667405469844993,
# -0.67450531578494011)], #NotImplementedError
# rvs not tested
['loggamma', (0.41411931826052117,)],
['logistic', ()],
['loglaplace', (3.2505926592051435,)],
['lognorm', (0.95368226960575331,)],
['lomax', (1.8771398388773268,)], #this has infinite variance
['lomax', (10,)], #first 4 moments are finite
['maxwell', ()],
['mielke', (10.4, 3.6)], # sample mean test fails for (4.6420495492121487, 0.59707419545516938)],
# mielke: good results if 2nd parameter >2, weird mean or var below
['nakagami', (4.9673794866666237,)],
['ncf', (27, 27, 0.41578441799226107)],
['nct', (14, 0.24045031331198066)],
['ncx2', (21, 1.0560465975116415)],
['norm', ()],
['pareto', (2.621716532144454,)],
['powerlaw', (1.6591133289905851,)],
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
['powernorm', (4.4453652254590779,)],
['rayleigh', ()],
['rdist', (0.9,)], # feels also slow
# ['rdist', (3.8266985793976525,)], #veryslow, especially rvs
#['rdist', (541.0,)], # from ticket #758 #veryslow
['recipinvgauss', (0.63004267809369119,)],
['reciprocal', (0.0062309367010521255, 1.0062309367010522)],
['rice', (0.7749725210111873,)],
['semicircular', ()],
['t', (2.7433514990818093,)],
['triang', (0.15785029824528218,)],
['truncexpon', (4.6907725456810478,)],
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
['tukeylambda', (3.1321477856738267,)],
['uniform', ()],
['vonmises', (3.9939042581071398,)],
['wald', ()],
['weibull_max', (2.8687961709100187,)],
['weibull_min', (1.7866166930421596,)],
['wrapcauchy', (0.031071279018614728,)]]
distdiscrete = [
['bernoulli',(0.3,)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)], #0.5
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['hypergeom',(21,3,12)], #numpy.random (3,18,12) numpy ticket:921
['hypergeom',(21,18,11)], #numpy.random (18,3,11) numpy ticket:921
['logser', (0.6,)], # reenabled, numpy ticket:921
['nbinom', (5, 0.5)],
['nbinom', (0.4, 0.4)], #from tickets: 583
['planck', (0.51,)], #4.1
['poisson', (0.6,)],
['randint', (7, 31)],
['skellam', (15, 8)],
['zipf', (4,)] ] # arg=4 is ok,
# Zipf broken for arg = 2, e.g. weird .stats
# looking closer, mean, var should be inf for arg=2
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'rice', 'mielke', 'semicircular', 'cosine', 'invweibull',
'powerlognorm', 'johnsonsu', 'kstwobign']
|
braintreeps/moto
|
refs/heads/master
|
moto/emr/urls.py
|
14
|
from __future__ import unicode_literals
from .responses import ElasticMapReduceResponse
url_bases = [
"https?://(.+).elasticmapreduce.amazonaws.com",
"https?://elasticmapreduce.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': ElasticMapReduceResponse.dispatch,
}
|
DarioGT/OMS-PluginXML
|
refs/heads/master
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/encodings/unicode_escape.py
|
3
|
""" Python 'unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_escape_encode
decode = codecs.unicode_escape_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
|
KitKatXperience/platform_external_chromium_org
|
refs/heads/kk
|
tools/telemetry/telemetry/core/extension_unittest.py
|
23
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import tempfile
import unittest
from telemetry.core import browser_finder
from telemetry.core import extension_to_load
from telemetry.core.chrome import extension_dict_backend
from telemetry.unittest import options_for_unittests
class ExtensionTest(unittest.TestCase):
def setUp(self):
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type)
options.extensions_to_load = [load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self._browser = None
self._extension = None
if not browser_to_create:
# May not find a browser that supports extensions.
return
self._browser = browser_to_create.Create()
self._browser.Start()
self._extension = self._browser.extensions[load_extension]
self.assertTrue(self._extension)
def tearDown(self):
if self._browser:
self._browser.Close()
def testExtensionBasic(self):
"""Test ExtensionPage's ExecuteJavaScript and EvaluateJavaScript."""
if not self._extension:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
self._extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef',
self._extension.EvaluateJavaScript('_testVar'))
def testDisconnect(self):
"""Test that ExtensionPage.Disconnect exists by calling it.
EvaluateJavaScript should reconnect."""
if not self._extension:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
self._extension.Disconnect()
self.assertEquals(2, self._extension.EvaluateJavaScript('1+1'))
class NonExistentExtensionTest(unittest.TestCase):
def testNonExistentExtensionPath(self):
"""Test that a non-existent extension path will raise an exception."""
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'foo')
options = options_for_unittests.GetCopy()
self.assertRaises(extension_to_load.ExtensionPathNonExistentException,
lambda: extension_to_load.ExtensionToLoad(
extension_path, options.browser_type))
def testExtensionNotLoaded(self):
"""Querying an extension that was not loaded will return None"""
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type)
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
b.Start()
if b.supports_extensions:
self.assertRaises(extension_dict_backend.ExtensionNotFoundException,
lambda: b.extensions[load_extension])
class MultipleExtensionTest(unittest.TestCase):
def setUp(self):
""" Copy the manifest and background.js files of simple_extension to a
number of temporary directories to load as extensions"""
self._extension_dirs = [tempfile.mkdtemp()
for i in range(3)] # pylint: disable=W0612
src_extension_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension'))
manifest_path = os.path.join(src_extension_dir, 'manifest.json')
script_path = os.path.join(src_extension_dir, 'background.js')
for d in self._extension_dirs:
shutil.copy(manifest_path, d)
shutil.copy(script_path, d)
options = options_for_unittests.GetCopy()
self._extensions_to_load = [extension_to_load.ExtensionToLoad(
d, options.browser_type)
for d in self._extension_dirs]
options.extensions_to_load = self._extensions_to_load
browser_to_create = browser_finder.FindBrowser(options)
self._browser = None
# May not find a browser that supports extensions.
if browser_to_create:
self._browser = browser_to_create.Create()
self._browser.Start()
def tearDown(self):
if self._browser:
self._browser.Close()
for d in self._extension_dirs:
shutil.rmtree(d)
def testMultipleExtensions(self):
if not self._browser:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
# Test contains.
loaded_extensions = filter(lambda e: e in self._browser.extensions,
self._extensions_to_load)
self.assertEqual(len(loaded_extensions), len(self._extensions_to_load))
for load_extension in self._extensions_to_load:
extension = self._browser.extensions[load_extension]
assert extension
extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
class ComponentExtensionTest(unittest.TestCase):
def testComponentExtensionBasic(self):
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'component_extension')
options = options_for_unittests.GetCopy()
load_extension = extension_to_load.ExtensionToLoad(
extension_path, options.browser_type, is_component=True)
options.extensions_to_load = [load_extension]
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
logging.warning('Did not find a browser that supports extensions, '
'skipping test.')
return
with browser_to_create.Create() as b:
b.Start()
extension = b.extensions[load_extension]
extension.ExecuteJavaScript('setTestVar("abcdef")')
self.assertEquals('abcdef', extension.EvaluateJavaScript('_testVar'))
def testComponentExtensionNoPublicKey(self):
# simple_extension does not have a public key.
extension_path = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data', 'simple_extension')
options = options_for_unittests.GetCopy()
self.assertRaises(extension_to_load.MissingPublicKeyException,
lambda: extension_to_load.ExtensionToLoad(
extension_path,
browser_type=options.browser_type,
is_component=True))
|
cliffe/SecGen
|
refs/heads/master
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/encodings/euc_jis_2004.py
|
816
|
#
# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.