repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
fake-name/ReadableWebProxy
|
refs/heads/master
|
WebMirror/management/rss_parser_funcs/feed_parse_extractThrillerparadisetranslationsWordpressCom.py
|
1
|
def extractThrillerparadisetranslationsWordpressCom(item):
'''
Parser for 'thrillerparadisetranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
elimence/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/migrations/0009_add_field_default.py
|
80
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'XModuleContentField.value'
db.alter_column('courseware_xmodulecontentfield', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'XModuleStudentInfoField.value'
db.alter_column('courseware_xmodulestudentinfofield', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'XModuleSettingsField.value'
db.alter_column('courseware_xmodulesettingsfield', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'XModuleStudentPrefsField.value'
db.alter_column('courseware_xmodulestudentprefsfield', 'value', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'XModuleContentField.value'
db.alter_column('courseware_xmodulecontentfield', 'value', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'XModuleStudentInfoField.value'
db.alter_column('courseware_xmodulestudentinfofield', 'value', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'XModuleSettingsField.value'
db.alter_column('courseware_xmodulesettingsfield', 'value', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'XModuleStudentPrefsField.value'
db.alter_column('courseware_xmodulestudentprefsfield', 'value', self.gf('django.db.models.fields.TextField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courseware.xmodulecontentfield': {
'Meta': {'unique_together': "(('definition_id', 'field_name'),)", 'object_name': 'XModuleContentField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'definition_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'default': "'null'"})
},
'courseware.xmodulesettingsfield': {
'Meta': {'unique_together': "(('usage_id', 'field_name'),)", 'object_name': 'XModuleSettingsField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'usage_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'default': "'null'"})
},
'courseware.xmodulestudentinfofield': {
'Meta': {'unique_together': "(('student', 'field_name'),)", 'object_name': 'XModuleStudentInfoField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {'default': "'null'"})
},
'courseware.xmodulestudentprefsfield': {
'Meta': {'unique_together': "(('student', 'module_type', 'field_name'),)", 'object_name': 'XModuleStudentPrefsField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {'default': "'null'"})
}
}
complete_apps = ['courseware']
|
phalax4/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/contextlib.py
|
261
|
"""Utilities for with-statement contexts. See PEP 343."""
import sys
from functools import wraps
from warnings import warn
__all__ = ["contextmanager", "nested", "closing"]
class GeneratorContextManager(object):
"""Helper for @contextmanager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration, exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
"""Combine multiple context managers into a single nested context manager.
This function has been deprecated in favour of the multiple manager form
of the with statement.
The one advantage of this function over the multiple manager form of the
with statement is that argument unpacking allows it to be
used with a variable number of context managers as follows:
with nested(*managers):
do_something()
"""
warn("With-statements now directly support multiple context managers",
DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
|
modelica-3rdparty/netCDF-DataReader
|
refs/heads/master
|
NcDataReader2/Resources/examples/GenerateFile.py
|
1
|
#!/usr/bin/env python
# This does exactly the same as GenerateFile.c .
# There are several netCDF interfaces for python available,
# this script will try use the one from scipy and fall back to pupynere.
# (http://pypi.python.org/pypi/pupynere/).
try:
from scipy.io import *
except:
from pupynere import *
from math import pi, sin, tanh
from random import random
DIM = 100
NPOINTS = 1000
def createFile(fileName):
f = netcdf_file(fileName, 'w')
f.doc = 'test file for ncDataReader2, created by GenerateFile.py'
f.version = '2.5.1'
f.foo = 42.42
f.bar = 1234
return f
def create1DVar(f):
f.createDimension('time', DIM)
time = f.createVariable('time', 'd', ('time', ))
time.scale_factor = 2.5
time.add_offset = 0.0
time.extrapolation = 'periodic'
test = f.createVariable('test1D', 'd', ('time', ))
test.scale_factor = 2.0
test.add_offset = 3.0
test.interpolation = 'akima'
test.smoothing = 500.0
step = 2.0 * pi / (DIM - 1.0)
for i in range(DIM):
x = pi + i*step + 0.3*random()*step
time[i] = x
test[i] = sin(x) + 0.1*random()
def create2DVar(f):
f.createDimension('npoints', NPOINTS)
f.createDimension('xyz', 3)
points = f.createVariable('points', 'd', ('npoints', 'xyz'))
for i in range(NPOINTS):
x = 10.0 * random() - 5.0
y = 10.0 * random() - 5.0
points[i] = (x, y, tanh(x*y))
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
f = createFile(sys.argv[1])
else:
f = createFile('testfile.nc')
create1DVar(f)
create2DVar(f)
f.sync()
f.close()
|
m1093782566/openstack_org_ceilometer
|
refs/heads/dev
|
ceilometer/alarm/storage/models.py
|
6
|
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the storage API.
"""
from ceilometer.storage import base
class Alarm(base.Model):
ALARM_INSUFFICIENT_DATA = 'insufficient data'
ALARM_OK = 'ok'
ALARM_ALARM = 'alarm'
ALARM_ACTIONS_MAP = {
ALARM_INSUFFICIENT_DATA: 'insufficient_data_actions',
ALARM_OK: 'ok_actions',
ALARM_ALARM: 'alarm_actions',
}
"""
An alarm to monitor.
:param alarm_id: UUID of the alarm
:param type: type of the alarm
:param name: The Alarm name
:param description: User friendly description of the alarm
:param enabled: Is the alarm enabled
:param state: Alarm state (ok/alarm/insufficient data)
:param rule: A rule that defines when the alarm fires
:param user_id: the owner/creator of the alarm
:param project_id: the project_id of the creator
:param evaluation_periods: the number of periods
:param period: the time period in seconds
:param time_constraints: the list of the alarm's time constraints, if any
:param timestamp: the timestamp when the alarm was last updated
:param state_timestamp: the timestamp of the last state change
:param ok_actions: the list of webhooks to call when entering the ok state
:param alarm_actions: the list of webhooks to call when entering the
alarm state
:param insufficient_data_actions: the list of webhooks to call when
entering the insufficient data state
:param repeat_actions: Is the actions should be triggered on each
alarm evaluation.
"""
def __init__(self, alarm_id, type, enabled, name, description,
timestamp, user_id, project_id, state, state_timestamp,
ok_actions, alarm_actions, insufficient_data_actions,
repeat_actions, rule, time_constraints):
base.Model.__init__(
self,
alarm_id=alarm_id,
type=type,
enabled=enabled,
name=name,
description=description,
timestamp=timestamp,
user_id=user_id,
project_id=project_id,
state=state,
state_timestamp=state_timestamp,
ok_actions=ok_actions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
repeat_actions=repeat_actions,
rule=rule,
time_constraints=time_constraints)
class AlarmChange(base.Model):
"""Record of an alarm change.
:param event_id: UUID of the change event
:param alarm_id: UUID of the alarm
:param type: The type of change
:param detail: JSON fragment describing change
:param user_id: the user ID of the initiating identity
:param project_id: the project ID of the initiating identity
:param on_behalf_of: the tenant on behalf of which the change
is being made
:param timestamp: the timestamp of the change
"""
CREATION = 'creation'
RULE_CHANGE = 'rule change'
STATE_TRANSITION = 'state transition'
DELETION = 'deletion'
def __init__(self,
event_id,
alarm_id,
type,
detail,
user_id,
project_id,
on_behalf_of,
timestamp=None
):
base.Model.__init__(
self,
event_id=event_id,
alarm_id=alarm_id,
type=type,
detail=detail,
user_id=user_id,
project_id=project_id,
on_behalf_of=on_behalf_of,
timestamp=timestamp)
|
jylaxp/django
|
refs/heads/master
|
django/contrib/gis/geos/coordseq.py
|
374
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise IndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
|
DavidCain/find-great-books
|
refs/heads/master
|
bibliophile/__init__.py
|
1
|
# Importing grequests will monkeypatch SSL
# Recursion errors occur if this import does not come before module imports
import grequests # NoQA
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/google/appengine/api/labs/taskqueue/taskqueue_stub.py
|
2
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
import StringIO
import base64
import bisect
import datetime
import logging
import os
import random
import string
import time
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.api.labs.taskqueue import taskqueue_service_pb
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = '5.00/s'
DEFAULT_BUCKET_SIZE = 5
MAX_ETA_DELTA_DAYS = 30
admin_console_dummy_tasks = {}
BUILT_IN_HEADERS = set(['x-appengine-queuename',
'x-appengine-taskname',
'x-appengine-taskretrycount',
'x-appengine-development-payload',
'content-length'])
DEFAULT_QUEUE_NAME = 'default'
CRON_QUEUE_NAME = '__cron'
class _DummyTaskStore(object):
"""A class that encapsulates a sorted store of tasks.
Used for testing the admin console.
"""
def __init__(self):
"""Constructor."""
self._sorted_by_name = []
self._sorted_by_eta = []
def _InsertTask(self, task):
"""Insert a task into the dummy store, keeps lists sorted.
Args:
task: the new task.
"""
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
if eta is None:
pos = bisect.bisect_left(self._sorted_by_name, (name,))
tasks = (x[1] for x in self._sorted_by_name[pos:pos + maximum])
return list(tasks)
if name is None:
raise ValueError('must supply name or eta')
pos = bisect.bisect_left(self._sorted_by_eta, (eta, name))
tasks = (x[2] for x in self._sorted_by_eta[pos:pos + maximum])
return list(tasks)
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def Add(self, request):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store.
"""
pos = bisect.bisect_left(self._sorted_by_name, (request.task_name(),))
if (pos < len(self._sorted_by_name) and
self._sorted_by_name[pos][0] == request.task_name()):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
now = datetime.datetime.utcnow()
now_sec = time.mktime(now.timetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(now_sec * 1e6)
task.set_url(request.url())
task.set_method(request.method())
for keyvalue in task.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule())
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone())
self._InsertTask(task)
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.OK: otherwise.
"""
pos = bisect.bisect_left(self._sorted_by_name, (name,))
if pos >= len(self._sorted_by_name):
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
if self._sorted_by_name[pos][1].task_name() != name:
logging.info('looking for task name %s, got task name %s', name,
self._sorted_by_name[pos][1].task_name())
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[1]
eta = old_task.eta_usec()
pos = bisect.bisect_left(self._sorted_by_eta, (eta, name, None))
if self._sorted_by_eta[pos][2] is not old_task:
logging.error('task store corrupted')
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR
self._sorted_by_eta.pop(pos)
return taskqueue_service_pb.TaskQueueServiceError.OK
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
now = datetime.datetime.utcnow()
now_sec = time.mktime(now.timetuple())
def RandomTask():
"""Creates a new task and randomly populates values."""
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(''.join(random.choice(string.ascii_lowercase)
for x in range(20)))
task.set_eta_usec(int(now_sec * 1e6) + random.randint(-10e6, 600e6))
task.set_creation_time_usec(min(now_sec * 1e6, task.eta_usec()) -
random.randint(0, 2e7))
task.set_url(random.choice(['/a', '/b', '/c', '/d']))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST)
task.set_body('A' * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET)
task.set_retry_count(max(0, random.randint(-10, 5)))
if random.random() < 0.3:
random_headers = [('nexus', 'one'),
('foo', 'bar'),
('content-type', 'text/plain'),
('from', 'user@email.com')]
for _ in xrange(random.randint(1, 4)):
elem = random.randint(0, len(random_headers)-1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
for _ in range(num_tasks):
self._InsertTask(RandomTask())
def _ParseQueueYaml(unused_self, root_path):
"""Loads the queue.yaml file and parses it.
Args:
unused_self: Allows this function to be bound to a class member. Not used.
root_path: Directory containing queue.yaml. Not used.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if root_path is None:
return None
for queueyaml in ('queue.yaml', 'queue.yml'):
try:
fh = open(os.path.join(root_path, queueyaml), 'r')
except IOError:
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
return queue_info
finally:
fh.close()
return None
def _CompareTasksByEta(a, b):
"""Python sort comparator for tasks by estimated time of arrival (ETA).
Args:
a: A taskqueue_service_pb.TaskQueueAddRequest.
b: A taskqueue_service_pb.TaskQueueAddRequest.
Returns:
Standard 1/0/-1 comparison result.
"""
if a.eta_usec() > b.eta_usec():
return 1
if a.eta_usec() < b.eta_usec():
return -1
return 0
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
return eta.strftime('%Y/%m/%d %H:%M:%S')
def _EtaDelta(eta_usec):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
now = datetime.datetime.utcnow()
if eta > now:
return str(eta - now) + ' from now'
else:
return str(now - eta) + ' ago'
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
queue_yaml_parser = _ParseQueueYaml
def __init__(self,
service_name='taskqueue',
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
"""
super(TaskQueueServiceStub, self).__init__(service_name)
self._taskqueues = {}
self._next_task_id = 1
self._root_path = root_path
self._all_queues_valid = _all_queues_valid
self._add_event = None
self._auto_task_running = auto_task_running
self._task_retry_seconds = task_retry_seconds
self._app_queues = {}
class _QueueDetails(taskqueue_service_pb.TaskQueueUpdateQueueRequest):
def __init__(self, paused=False):
self.paused = paused
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return 'task%d' % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(request.eta_usec() / 1e6)
max_eta = (datetime.datetime.utcnow() +
datetime.timedelta(days=MAX_ETA_DELTA_DAYS))
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
return taskqueue_service_pb.TaskQueueServiceError.OK
def _Dynamic_Add(self, request, response):
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name())
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), 'taskqueue should prevent empty requests'
app_id = None
if request.add_request(0).has_app_id():
app_id = request.add_request(0).app_id()
if not self._IsValidQueue(request.add_request(0).queue_name(), app_id):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
error_found = False
task_results_with_chosen_names = []
for add_request in request.add_request_list():
task_result = response.add_taskresult()
error = self._VerifyTaskQueueAddRequest(add_request)
if error == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.append(task_result)
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED)
else:
error_found = True
task_result.set_result(error)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
elif request.add_request(0).has_app_id():
self._DummyTaskStoreBulkAdd(request, response)
else:
self._NonTransactionalBulkAdd(request, response)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if task_result in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
'datastore_v3', 'AddActions', request, api_base_pb.VoidProto())
except apiproxy_errors.ApplicationError, e:
raise apiproxy_errors.ApplicationError(
e.application_error +
taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail)
def _DummyTaskStoreBulkAdd(self, request, response):
"""Adds tasks to the appropriate DummyTaskStore.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
"""
store = self.GetDummyTaskStore(request.add_request(0).app_id(),
request.add_request(0).queue_name())
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
try:
store.Add(add_request)
except apiproxy_errors.ApplicationError, e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
def _NonTransactionalBulkAdd(self, request, response):
"""Adds tasks to the appropriate list in in self._taskqueues.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
"""
existing_tasks = self._taskqueues.setdefault(
request.add_request(0).queue_name(), [])
existing_task_names = set(task.task_name() for task in existing_tasks)
def DefineCallback(queue_name, task_name):
return lambda: self._RunTask(queue_name, task_name)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if add_request.task_name() in existing_task_names:
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
else:
existing_tasks.append(add_request)
if self._add_event and self._auto_task_running:
self._add_event(
add_request.eta_usec() / 1000000.0,
DefineCallback(add_request.queue_name(), add_request.task_name()))
existing_tasks.sort(_CompareTasksByEta)
def _IsValidQueue(self, queue_name, app_id):
"""Determines whether a queue is valid, i.e. tasks can be added to it.
Valid queues are the 'default' queue, plus any queues in the queue.yaml
file.
Args:
queue_name: the name of the queue to validate.
app_id: the app_id. Can be None.
Returns:
True iff queue is valid.
"""
if self._all_queues_valid:
return True
if queue_name == DEFAULT_QUEUE_NAME or queue_name == CRON_QUEUE_NAME:
return True
queue_info = self.queue_yaml_parser(self._root_path)
if queue_info and queue_info.queue:
for entry in queue_info.queue:
if entry.name == queue_name:
return True
if app_id is not None:
queues = self._app_queues.get(app_id, {})
return queues.get(queue_name, None) is not None
return False
def _RunTask(self, queue_name, task_name):
"""Returns a fake request for running a task in the dev_appserver.
Args:
queue_name: The queue the task is in.
task_name: The name of the task to run.
Returns:
None if this task no longer exists or tuple (connection, addrinfo) of
a fake connection and address information used to run this task. The
task will be deleted after it runs or re-enqueued in the future on
failure.
"""
task_list = self.GetTasks(queue_name)
for task in task_list:
if task['name'] == task_name:
break
else:
return None
class FakeConnection(object):
def __init__(self, input_buffer):
self.rfile = StringIO.StringIO(input_buffer)
self.wfile = StringIO.StringIO()
self.wfile_close = self.wfile.close
self.wfile.close = self.connection_done
def connection_done(myself):
result = myself.wfile.getvalue()
myself.wfile_close()
first_line, rest = (result.split('\n', 1) + ['', ''])[:2]
version, code, rest = (first_line.split(' ', 2) + ['', '500', ''])[:3]
try:
code = int(code)
except ValueError:
code = 500
if 200 <= int(code) <= 299:
self.DeleteTask(queue_name, task_name)
return
logging.warning('Task named "%s" on queue "%s" failed with code %s; '
'will retry in %d seconds',
task_name, queue_name, code, self._task_retry_seconds)
self._add_event(
time.time() + self._task_retry_seconds,
lambda: self._RunTask(queue_name, task_name))
def close(self):
pass
def makefile(self, mode, buffsize):
if mode.startswith('w'):
return self.wfile
else:
return self.rfile
payload = StringIO.StringIO()
payload.write('%s %s HTTP/1.1\r\n' % (task['method'], task['url']))
for key, value in task['headers']:
payload.write('%s: %s\r\n' % (key, value))
payload.write('\r\n')
payload.write(task['body'])
return FakeConnection(payload.getvalue()), ('0.1.0.2', 80)
def GetQueues(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
queues = []
queue_info = self.queue_yaml_parser(self._root_path)
has_default = False
if queue_info and queue_info.queue:
for entry in queue_info.queue:
if entry.name == DEFAULT_QUEUE_NAME:
has_default = True
queue = {}
queues.append(queue)
queue['name'] = entry.name
queue['max_rate'] = entry.rate
if entry.bucket_size:
queue['bucket_size'] = entry.bucket_size
else:
queue['bucket_size'] = DEFAULT_BUCKET_SIZE
tasks = self._taskqueues.setdefault(entry.name, [])
if tasks:
queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
else:
queue['oldest_task'] = ''
queue['tasks_in_queue'] = len(tasks)
if not has_default:
queue = {}
queues.append(queue)
queue['name'] = DEFAULT_QUEUE_NAME
queue['max_rate'] = DEFAULT_RATE
queue['bucket_size'] = DEFAULT_BUCKET_SIZE
tasks = self._taskqueues.get(DEFAULT_QUEUE_NAME, [])
if tasks:
queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
else:
queue['oldest_task'] = ''
queue['tasks_in_queue'] = len(tasks)
return queues
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = self._taskqueues.get(queue_name, [])
result_tasks = []
for task_request in tasks:
task = {}
result_tasks.append(task)
task['name'] = task_request.task_name()
task['queue_name'] = queue_name
task['url'] = task_request.url()
method = task_request.method()
if method == taskqueue_service_pb.TaskQueueAddRequest.GET:
task['method'] = 'GET'
elif method == taskqueue_service_pb.TaskQueueAddRequest.POST:
task['method'] = 'POST'
elif method == taskqueue_service_pb.TaskQueueAddRequest.HEAD:
task['method'] = 'HEAD'
elif method == taskqueue_service_pb.TaskQueueAddRequest.PUT:
task['method'] = 'PUT'
elif method == taskqueue_service_pb.TaskQueueAddRequest.DELETE:
task['method'] = 'DELETE'
else:
raise ValueError('Unexpected method: %d' % method)
task['eta'] = _FormatEta(task_request.eta_usec())
task['eta_delta'] = _EtaDelta(task_request.eta_usec())
task['body'] = base64.b64encode(task_request.body())
headers = [(header.key(), header.value())
for header in task_request.header_list()
if header.key().lower() not in BUILT_IN_HEADERS]
headers.append(('X-AppEngine-QueueName', queue_name))
headers.append(('X-AppEngine-TaskName', task['name']))
headers.append(('X-AppEngine-TaskRetryCount', '0'))
headers.append(('X-AppEngine-Development-Payload', '1'))
headers.append(('Content-Length', len(task['body'])))
if 'content-type' not in frozenset(key.lower() for key, _ in headers):
headers.append(('Content-Type', 'application/octet-stream'))
task['headers'] = headers
return result_tasks
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
tasks = self._taskqueues.get(queue_name, [])
for task in tasks:
if task.task_name() == task_name:
tasks.remove(task)
return
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue.
Args:
queue_name: the name of the queue to remove tasks from.
"""
self._taskqueues[queue_name] = []
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
queues = self._app_queues.setdefault(request.app_id(), {})
if request.queue_name() in queues and queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
defensive_copy = self._QueueDetails()
defensive_copy.CopyFrom(request)
queues[request.queue_name()] = defensive_copy
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
queues = self._app_queues.get(request.app_id(), {})
for unused_key, queue in sorted(queues.items()):
if request.max_rows() == response.queue_size():
break
if queue is None:
continue
response_queue = response.add_queue()
response_queue.set_queue_name(queue.queue_name())
response_queue.set_bucket_refill_per_second(
queue.bucket_refill_per_second())
response_queue.set_bucket_capacity(queue.bucket_capacity())
response_queue.set_user_specified_rate(queue.user_specified_rate())
if queue.has_max_concurrent_requests():
response_queue.set_max_concurrent_requests(
queue.max_concurrent_requests())
response_queue.set_paused(queue.paused)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the dummy store,
the rest with random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue in request.queue_name_list():
store = self.GetDummyTaskStore(request.app_id(), queue)
stats = response.add_queuestats()
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(scanner_info.executed_last_minute()
+ random.randint(0, 100))
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def GetDummyTaskStore(self, app_id, queue_name):
"""Get the dummy task store for this app_id/queue_name pair.
Creates an entry and populates it, if there's not already an entry.
Args:
app_id: the app_id.
queue_name: the queue_name.
Returns:
the existing or the new dummy store.
"""
task_store_key = (app_id, queue_name)
if task_store_key not in admin_console_dummy_tasks:
store = _DummyTaskStore()
if not self._all_queues_valid and queue_name != CRON_QUEUE_NAME:
store.Populate(random.randint(10, 100))
admin_console_dummy_tasks[task_store_key] = store
else:
store = admin_console_dummy_tasks[task_store_key]
return store
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Uses the dummy store, creating tasks if this is the first time the
queue has been seen.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
store = self.GetDummyTaskStore(request.app_id(), request.queue_name())
if request.has_start_eta_usec():
tasks = store.Lookup(request.max_rows(), name=request.start_task_name(),
eta=request.start_eta_usec())
else:
tasks = store.Lookup(request.max_rows(), name=request.start_task_name())
for task in tasks:
response.add_task().MergeFrom(task)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the dummy store. A 1/20 chance of a transient error.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
task_store_key = (request.app_id(), request.queue_name())
if task_store_key not in admin_console_dummy_tasks:
for _ in request.task_name_list():
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
return
store = admin_console_dummy_tasks[task_store_key]
for taskname in request.task_name_list():
if random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
else:
response.add_result(store.Delete(taskname))
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This is a no-op here.
This will fail randomly for testing.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR)
else:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
queues[request.queue_name()] = None
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() != DEFAULT_QUEUE_NAME:
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
queues[request.queue_name()].paused = request.pause()
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
if request.has_app_id():
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() != DEFAULT_QUEUE_NAME:
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
store = self.GetDummyTaskStore(request.app_id(), request.queue_name())
for task in store.Lookup(store.Count()):
store.Delete(task.task_name())
elif (not self._IsValidQueue(request.queue_name(), None)
and not request.queue_name() in self._taskqueues):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
self.FlushQueue(request.queue_name())
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
queues = self._app_queues.get(request.app_id(), {})
for queue in queues.iterkeys():
store = self.GetDummyTaskStore(request.app_id(), queue)
for task in store.Lookup(store.Count()):
store.Delete(task.task_name())
self.FlushQueue(queue)
self._app_queues[request.app_id()] = {}
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
response.set_new_limit(request.limit())
|
shownomercy/django
|
refs/heads/master
|
tests/gis_tests/test_geoip.py
|
48
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip import HAS_GEOIP
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.utils import six
if HAS_GEOIP:
from django.contrib.gis.geoip import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("duesseldorf.de")
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
|
agaffney/ansible
|
refs/heads/devel
|
test/units/utils/display/test_display.py
|
52
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.utils.display import Display
def test_display_basic_message(capsys, mocker):
# Disable logging
mocker.patch('ansible.utils.display.logger', return_value=None)
d = Display()
d.display(u'Some displayed message')
out, err = capsys.readouterr()
assert out == 'Some displayed message\n'
assert err == ''
|
witwall/gyp
|
refs/heads/master
|
test/win/gyptest-link-large-pdb.py
|
218
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
|
IITM-DONLAB/python-dnn
|
refs/heads/master
|
src/pythonDnn/run/run_DBN.py
|
1
|
#!/usr/bin/env python2.7
# Copyright 2014 G.K SUDHARSHAN <sudharpun90@gmail.comIIT Madras
# Copyright 2014 Abil N George<mail@abilng.inIIT Madras
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
#lib imports
import time
import numpy
import theano
from theano.tensor.shared_randomstreams import RandomStreams
#module imports
from pythonDnn.utils.load_conf import load_model,load_rbm_spec,load_data_spec
from pythonDnn.models.dbn import DBN
from pythonDnn.io_modules.file_reader import read_dataset
from pythonDnn.io_modules.data_exporter import export_data
from pythonDnn.io_modules import setLogger
from pythonDnn.utils.utils import parse_activation
from pythonDnn.run import fineTunning,testing,exportFeatures
from pythonDnn.run import createDir
import logging
logger = logging.getLogger(__name__)
def getFunction(dbn):
(op,k) = dbn.rbm_layers[-1].propdown(dbn.features);
in_x = dbn.x.type('in_x');
fn = theano.function(inputs=[in_x],outputs=op,
givens={dbn.x: in_x},name='re')#,on_unused_input='warn')
return fn
def preTraining(dbn,train_sets,pretrain_config):
train_xy = train_sets.shared_xy
train_x = train_sets.shared_x
logger.info('Getting the pretraining functions....')
batch_size = train_sets.batch_size;
pretrainingEpochs = pretrain_config['epochs']
keep_layer_num=pretrain_config['keep_layer_num']
initialMomentum = pretrain_config['initial_momentum']
initMomentumEpochs = pretrain_config['initial_momentum_epoch']
finalMomentum = pretrain_config['final_momentum']
pretraining_fns = dbn.pretraining_functions(train_set_x=train_x,
batch_size=batch_size,
weight_cost = 0.0002)
logger.info('Pre-training the model ...')
start_time = time.clock()
## Pre-train layer-wise
for i in range(keep_layer_num, dbn.nPreTrainLayers):
if (dbn.rbm_layers[i].is_gbrbm()):
pretrain_lr = pretrain_config['gbrbm_learning_rate']
else:
pretrain_lr = pretrain_config['learning_rate']
# go through pretraining epochs
momentum = initialMomentum
for epoch in xrange(pretrainingEpochs):
# go through the training set
if (epoch > initMomentumEpochs):
momentum = finalMomentum
r_c, fe_c = [], [] # keep record of reconstruction and free-energy cost
while not train_sets.is_finish():
for batch_index in xrange(train_sets.cur_frame_num / batch_size):
# loop over mini-batches
#logger.info("Training For epoch %d and batch %d",epoch,batch_index)
[reconstruction_cost, free_energy_cost] = pretraining_fns[i](index=batch_index,
lr=pretrain_lr,
momentum=momentum)
logger.debug('Training batch %d reconstruction cost=%f,free_energy_cost=%f',
batch_index,reconstruction_cost,free_energy_cost);
r_c.append(reconstruction_cost)
fe_c.append(free_energy_cost)
train_sets.read_next_partition_data()
logger.info('Training layer %i, epoch %d, reconstruction cost=%f,free_energy_cost=%f',
i, epoch, numpy.mean(r_c), numpy.mean(fe_c))
train_sets.initialize_read()
end_time = time.clock()
logger.info('The PreTraing ran for %.2fm' % ((end_time - start_time) / 60.))
def runRBM(arg):
if type(arg) is dict:
model_config = arg
else :
model_config = load_model(arg,'RBM')
rbm_config = load_rbm_spec(model_config['nnet_spec'])
data_spec = load_data_spec(model_config['data_spec'],model_config['batch_size']);
#generating Random
numpy_rng = numpy.random.RandomState(model_config['random_seed'])
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
activationFn = parse_activation(rbm_config['activation']);
createDir(model_config['wdir']);
#create working dir
batch_size = model_config['batch_size']
wdir = model_config['wdir']
dbn = DBN(numpy_rng=numpy_rng, theano_rng = theano_rng, n_ins=model_config['n_ins'],
hidden_layers_sizes=rbm_config['hidden_layers'],n_outs=model_config['n_outs'],
first_layer_gb = rbm_config['first_layer_gb'],
pretrainedLayers=rbm_config['pretrained_layers'],
activation=activationFn)
logger.info("Loading Pretrained network weights")
try:
# pretraining
ptr_file = model_config['input_file']
dbn.load(filename=ptr_file)
except KeyError, e:
logger.info("KeyMissing:"+str(e));
logger.info("Pretrained network Missing in configFile: Skipping Loading");
except IOError, e:
logger.error("IOError:"+str(e));
logger.error('Model cannot be initialize from input file ')
sys.exit(2)
#########################
# PRETRAINING THE MODEL #
#########################
if model_config['processes']['pretraining']:
train_sets = read_dataset(data_spec['training'])
preTraining(dbn,train_sets,model_config['pretrain_params'])
del train_sets;
########################
# FINETUNING THE MODEL #
########################
if model_config['processes']['finetuning']:
fineTunning(dbn,model_config,data_spec)
########################
# TESTING THE MODEL #
########################
if model_config['processes']['testing']:
testing(dbn,data_spec)
##########################
# Export Features ##
##########################
if model_config['processes']['export_data']:
exportFeatures(dbn,model_config,data_spec)
logger.info('Saving model to ' + str(model_config['output_file']) + '....')
dbn.save(filename=model_config['output_file'])
logger.info('Saved model to ' + str(model_config['output_file']))
if __name__ == '__main__':
import sys
setLogger();
runRBM(sys.argv[1])
|
alistairlow/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/bcast_ops_test.py
|
102
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.gen_array_ops import _broadcast_args
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class BcastOpsTest(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBasic(self):
r = self._GetBroadcastShape([2, 3, 5], [1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 1, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 1, 5])
self.assertAllEqual(r, [2, 3, 5])
def testBasicGradient(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
self.assertAllEqual(r0, [0, 1, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0])
r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
self.assertAllEqual(r0, [0])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
self.assertAllEqual(r0, [1])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [1])
def testZeroDims(self):
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
def testZeroDimsGradient(self):
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 3])
r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1, 3])
self.assertAllEqual(r1, [])
if __name__ == "__main__":
test.main()
|
GNUtn/eventoL
|
refs/heads/master
|
eventol/manager/context_processors.py
|
2
|
from django.conf import settings
from manager.models import EventolSetting
def eventol_settings(request):
"""Add eventol settings"""
return {
"PRIVATE_ACTIVITIES": settings.PRIVATE_ACTIVITIES,
"EVENTOL_SETTINGS": EventolSetting.load()
}
|
unseenlaser/linuxcnc
|
refs/heads/add-hal-ethercat
|
src/emc/usr_intf/axis/scripts/teach-in.py
|
12
|
#!/usr/bin/python
"""Usage:
python teach.py nmlfile outputfile
If outputfile is not specified, writes to standard output.
You must ". scripts/rip-environment" before running this script, if you use
run-in-place.
"""
# Copyright 2007 Jeff Epler <jepler@unpythonic.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import linuxcnc
import Tkinter
import sys
linenumber = 1;
if len(sys.argv) > 1:
linuxcnc.nmlfile = sys.argv[1]
if len(sys.argv) > 2:
outfile = sys.argv[2]
sys.stdout = open(outfile, 'w')
s = linuxcnc.stat()
def get_cart():
s.poll()
position = " ".join(["%-8.4f"] * s.axes)
return position % s.position[:s.axes]
def get_joint():
s.poll()
position = " ".join(["%-8.4f"] * s.axes)
return position % s.joint_actual_position[:s.axes]
def log():
global linenumber;
if world.get():
p = get_cart()
else:
p = get_joint()
label1.configure(text='Learned: %s' % p)
print linenumber, p, s.flood, s.mist, s.lube, s.spindle_enabled;
linenumber += 1;
def show():
s.poll()
if world.get():
p = get_cart()
else:
p = get_joint()
label2.configure(text='Position: %s' % p)
app.after(100, show)
app = Tkinter.Tk(); app.wm_title('LinuxCNC Teach-In')
world = Tkinter.IntVar(app)
button = Tkinter.Button(app, command=log, text='Learn', font=("helvetica", 14))
button.pack(side='left')
label2 = Tkinter.Label(app, width=60, font='fixed', anchor="w")
label2.pack(side='top')
label1 = Tkinter.Label(app, width=60, font='fixed', text="Learned: (nothing yet)", anchor="w")
label1.pack(side='top')
r1 = Tkinter.Radiobutton(app, text="Joint", variable=world, value=0)
r1.pack(side='left')
r2 = Tkinter.Radiobutton(app, text="World", variable=world, value=1)
r2.pack(side='left')
show()
app.mainloop()
|
int19h/PTVS
|
refs/heads/master
|
Python/Tests/TestData/DebuggerProject/B/module1.py
|
14
|
def do_something():
for j in range(200):
print(j)
|
jpshort/odoo
|
refs/heads/8.0
|
openerp/addons/test_new_api/__openerp__.py
|
204
|
# -*- coding: utf-8 -*-
{
'name': 'Test New API',
'version': '1.0',
'category': 'Tests',
'description': """A module to test the new API.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'installable': True,
'auto_install': False,
'data': [
'ir.model.access.csv',
'views.xml',
'demo_data.xml',
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kivy/kivy
|
refs/heads/master
|
examples/shader/rotated.py
|
9
|
'''
Rotated Shader
=============
This shader example is a modified version of plasma.py that shows how to
rotate areas of fragment shaders bounded by vertex_instructions.
'''
from kivy.app import App
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.graphics import RenderContext
from kivy.properties import StringProperty
from kivy.uix.widget import Widget
# imported early for side effects needed for Shader
import kivy.core.window
shared_code = '''
$HEADER$
uniform float time;
vec4 tex(void)
{
return frag_color * texture2D(texture0, tex_coord0);
}
float plasmaFunc(float n1, float n2, float n3, float n4)
{
vec4 fPos = frag_modelview_mat * gl_FragCoord;
return abs(sin(
sin(sin(fPos.x / n1) + time) +
sin(fPos.y / n2 + time) +
n4 * sin((fPos.x + fPos.y) / n3)));
}
'''
plasma_shader = shared_code + '''
void main(void)
{
float green = plasmaFunc(40., 30., 100., 3.5);
gl_FragColor = vec4(1.0, green, 1.0, 1.0) * tex();
}
'''
plasma_shader2 = shared_code + '''
void main(void)
{
float red = plasmaFunc(30., 20., 10., .5);
gl_FragColor = vec4(red, 1.0, 1.0, 1.0) * tex();
}
'''
class ShaderWidget(Widget):
# property to set the source code for fragment shader
fs = StringProperty(None)
def __init__(self, **kwargs):
# Instead of using Canvas, we will use a RenderContext,
# and change the default fragment shader used.
self.canvas = RenderContext(use_parent_projection=True,
use_parent_modelview=True,
use_parent_frag_modelview=True)
# call the constructor of parent
# if they are any graphics object, they will be added on our new canvas
super(ShaderWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
Clock.schedule_interval(self.update_glsl, 1 / 60.)
def update_glsl(self, *largs):
self.canvas['time'] = Clock.get_boottime()
def on_fs(self, instance, value):
# set the fragment shader to our source code
shader = self.canvas.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('failed')
class RotatedApp(App):
def build(self):
main_widget = Factory.MainWidget()
main_widget.fs = plasma_shader
main_widget.mini.fs = plasma_shader2
return main_widget
if __name__ == '__main__':
RotatedApp().run()
|
DailyActie/Surrogate-Model
|
refs/heads/master
|
surrogate/sorting/utils/__init__.py
|
1
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
from .isDominated import isDominated
from .splitA import splitA
from .splitB import splitB
from .sweepA import sweepA
from .sweepB import sweepB
__all__ = [
'isDominated',
'sweepA', 'splitA',
'sweepB', 'splitB'
]
|
hzlf/openbroadcast
|
refs/heads/master
|
tools/identify/requests/packages/chardet/langgreekmodel.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1253"
}
|
ryansb/boto
|
refs/heads/master
|
boto/ec2/instance.py
|
94
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Instance
"""
import boto
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.resultset import ResultSet
from boto.ec2.address import Address
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.image import ProductCodes
from boto.ec2.networkinterface import NetworkInterface
from boto.ec2.group import Group
import base64
class InstanceState(object):
"""
The state of the instance.
:ivar code: The low byte represents the state. The high byte is an
opaque internal value and should be ignored. Valid values:
* 0 (pending)
* 16 (running)
* 32 (shutting-down)
* 48 (terminated)
* 64 (stopping)
* 80 (stopped)
:ivar name: The name of the state of the instance. Valid values:
* "pending"
* "running"
* "shutting-down"
* "terminated"
* "stopping"
* "stopped"
"""
def __init__(self, code=0, name=None):
self.code = code
self.name = name
def __repr__(self):
return '%s(%d)' % (self.name, self.code)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = int(value)
elif name == 'name':
self.name = value
else:
setattr(self, name, value)
class InstancePlacement(object):
"""
The location where the instance launched.
:ivar zone: The Availability Zone of the instance.
:ivar group_name: The name of the placement group the instance is
in (for cluster compute instances).
:ivar tenancy: The tenancy of the instance (if the instance is
running within a VPC). An instance with a tenancy of dedicated
runs on single-tenant hardware.
"""
def __init__(self, zone=None, group_name=None, tenancy=None):
self.zone = zone
self.group_name = group_name
self.tenancy = tenancy
def __repr__(self):
return self.zone
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'availabilityZone':
self.zone = value
elif name == 'groupName':
self.group_name = value
elif name == 'tenancy':
self.tenancy = value
else:
setattr(self, name, value)
class Reservation(EC2Object):
"""
Represents a Reservation response object.
:ivar id: The unique ID of the Reservation.
:ivar owner_id: The unique ID of the owner of the Reservation.
:ivar groups: A list of Group objects representing the security
groups associated with launched instances.
:ivar instances: A list of Instance objects launched in this
Reservation.
"""
def __init__(self, connection=None):
super(Reservation, self).__init__(connection)
self.id = None
self.owner_id = None
self.groups = []
self.instances = []
def __repr__(self):
return 'Reservation:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'instancesSet':
self.instances = ResultSet([('item', Instance)])
return self.instances
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
else:
return None
def endElement(self, name, value, connection):
if name == 'reservationId':
self.id = value
elif name == 'ownerId':
self.owner_id = value
else:
setattr(self, name, value)
def stop_all(self, dry_run=False):
for instance in self.instances:
instance.stop(dry_run=dry_run)
class Instance(TaggedEC2Object):
"""
Represents an instance.
:ivar id: The unique ID of the Instance.
:ivar groups: A list of Group objects representing the security
groups associated with the instance.
:ivar public_dns_name: The public dns name of the instance.
:ivar private_dns_name: The private dns name of the instance.
:ivar state: The string representation of the instance's current state.
:ivar state_code: An integer representation of the instance's
current state.
:ivar previous_state: The string representation of the instance's
previous state.
:ivar previous_state_code: An integer representation of the
instance's current state.
:ivar key_name: The name of the SSH key associated with the instance.
:ivar instance_type: The type of instance (e.g. m1.small).
:ivar launch_time: The time the instance was launched.
:ivar image_id: The ID of the AMI used to launch this instance.
:ivar placement: The availability zone in which the instance is running.
:ivar placement_group: The name of the placement group the instance
is in (for cluster compute instances).
:ivar placement_tenancy: The tenancy of the instance, if the instance
is running within a VPC. An instance with a tenancy of dedicated
runs on a single-tenant hardware.
:ivar kernel: The kernel associated with the instance.
:ivar ramdisk: The ramdisk associated with the instance.
:ivar architecture: The architecture of the image (i386|x86_64).
:ivar hypervisor: The hypervisor used.
:ivar virtualization_type: The type of virtualization used.
:ivar product_codes: A list of product codes associated with this instance.
:ivar ami_launch_index: This instances position within it's launch group.
:ivar monitored: A boolean indicating whether monitoring is enabled or not.
:ivar monitoring_state: A string value that contains the actual value
of the monitoring element returned by EC2.
:ivar spot_instance_request_id: The ID of the spot instance request
if this is a spot instance.
:ivar subnet_id: The VPC Subnet ID, if running in VPC.
:ivar vpc_id: The VPC ID, if running in VPC.
:ivar private_ip_address: The private IP address of the instance.
:ivar ip_address: The public IP address of the instance.
:ivar platform: Platform of the instance (e.g. Windows)
:ivar root_device_name: The name of the root device.
:ivar root_device_type: The root device type (ebs|instance-store).
:ivar block_device_mapping: The Block Device Mapping for the instance.
:ivar state_reason: The reason for the most recent state transition.
:ivar groups: List of security Groups associated with the instance.
:ivar interfaces: List of Elastic Network Interfaces associated with
this instance.
:ivar ebs_optimized: Whether instance is using optimized EBS volumes
or not.
:ivar instance_profile: A Python dict containing the instance
profile id and arn associated with this instance.
"""
def __init__(self, connection=None):
super(Instance, self).__init__(connection)
self.id = None
self.dns_name = None
self.public_dns_name = None
self.private_dns_name = None
self.key_name = None
self.instance_type = None
self.launch_time = None
self.image_id = None
self.kernel = None
self.ramdisk = None
self.product_codes = ProductCodes()
self.ami_launch_index = None
self.monitored = False
self.monitoring_state = None
self.spot_instance_request_id = None
self.subnet_id = None
self.vpc_id = None
self.private_ip_address = None
self.ip_address = None
self.requester_id = None
self._in_monitoring_element = False
self.persistent = False
self.root_device_name = None
self.root_device_type = None
self.block_device_mapping = None
self.state_reason = None
self.group_name = None
self.client_token = None
self.eventsSet = None
self.groups = []
self.platform = None
self.interfaces = []
self.hypervisor = None
self.virtualization_type = None
self.architecture = None
self.instance_profile = None
self._previous_state = None
self._state = InstanceState()
self._placement = InstancePlacement()
def __repr__(self):
return 'Instance:%s' % self.id
@property
def state(self):
return self._state.name
@property
def state_code(self):
return self._state.code
@property
def previous_state(self):
if self._previous_state:
return self._previous_state.name
return None
@property
def previous_state_code(self):
if self._previous_state:
return self._previous_state.code
return 0
@property
def placement(self):
return self._placement.zone
@property
def placement_group(self):
return self._placement.group_name
@property
def placement_tenancy(self):
return self._placement.tenancy
def startElement(self, name, attrs, connection):
retval = super(Instance, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'monitoring':
self._in_monitoring_element = True
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'stateReason':
self.state_reason = SubParse('stateReason')
return self.state_reason
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == "eventsSet":
self.eventsSet = SubParse('eventsSet')
return self.eventsSet
elif name == 'networkInterfaceSet':
self.interfaces = ResultSet([('item', NetworkInterface)])
return self.interfaces
elif name == 'iamInstanceProfile':
self.instance_profile = SubParse('iamInstanceProfile')
return self.instance_profile
elif name == 'currentState':
return self._state
elif name == 'previousState':
self._previous_state = InstanceState()
return self._previous_state
elif name == 'instanceState':
return self._state
elif name == 'placement':
return self._placement
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'imageId':
self.image_id = value
elif name == 'dnsName' or name == 'publicDnsName':
self.dns_name = value # backwards compatibility
self.public_dns_name = value
elif name == 'privateDnsName':
self.private_dns_name = value
elif name == 'keyName':
self.key_name = value
elif name == 'amiLaunchIndex':
self.ami_launch_index = value
elif name == 'previousState':
self.previous_state = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'launchTime':
self.launch_time = value
elif name == 'platform':
self.platform = value
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
self.ramdisk = value
elif name == 'state':
if self._in_monitoring_element:
self.monitoring_state = value
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
elif name == 'spotInstanceRequestId':
self.spot_instance_request_id = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'privateIpAddress':
self.private_ip_address = value
elif name == 'ipAddress':
self.ip_address = value
elif name == 'requesterId':
self.requester_id = value
elif name == 'persistent':
if value == 'true':
self.persistent = True
else:
self.persistent = False
elif name == 'groupName':
if self._in_monitoring_element:
self.group_name = value
elif name == 'clientToken':
self.client_token = value
elif name == "eventsSet":
self.events = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'architecture':
self.architecture = value
elif name == 'ebsOptimized':
self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the instance's state information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_reservations([self.id], dry_run=dry_run)
if len(rs) > 0:
r = rs[0]
for i in r.instances:
if i.id == self.id:
self._update(i)
elif validate:
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.state
def terminate(self, dry_run=False):
"""
Terminate the instance
"""
rs = self.connection.terminate_instances([self.id], dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def stop(self, force=False, dry_run=False):
"""
Stop the instance
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
rs = self.connection.stop_instances([self.id], force, dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def start(self, dry_run=False):
"""
Start the instance.
"""
rs = self.connection.start_instances([self.id], dry_run=dry_run)
if len(rs) > 0:
self._update(rs[0])
def reboot(self, dry_run=False):
return self.connection.reboot_instances([self.id], dry_run=dry_run)
def get_console_output(self, dry_run=False):
"""
Retrieves the console output for the instance.
:rtype: :class:`boto.ec2.instance.ConsoleOutput`
:return: The console output as a ConsoleOutput object
"""
return self.connection.get_console_output(self.id, dry_run=dry_run)
def confirm_product(self, product_code, dry_run=False):
return self.connection.confirm_product_instance(
self.id,
product_code,
dry_run=dry_run
)
def use_ip(self, ip_address, dry_run=False):
"""
Associates an Elastic IP to the instance.
:type ip_address: Either an instance of
:class:`boto.ec2.address.Address` or a string.
:param ip_address: The IP address to associate
with the instance.
:rtype: bool
:return: True if successful
"""
if isinstance(ip_address, Address):
ip_address = ip_address.public_ip
return self.connection.associate_address(
self.id,
ip_address,
dry_run=dry_run
)
def monitor(self, dry_run=False):
return self.connection.monitor_instance(self.id, dry_run=dry_run)
def unmonitor(self, dry_run=False):
return self.connection.unmonitor_instance(self.id, dry_run=dry_run)
def get_attribute(self, attribute, dry_run=False):
"""
Gets an attribute from this instance.
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
* instanceType
* kernel
* ramdisk
* userData
* disableApiTermination
* instanceInitiatedShutdownBehavior
* rootDeviceName
* blockDeviceMapping
* productCodes
* sourceDestCheck
* groupSet
* ebsOptimized
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
"""
return self.connection.get_instance_attribute(
self.id,
attribute,
dry_run=dry_run
)
def modify_attribute(self, attribute, value, dry_run=False):
"""
Changes an attribute of this instance
:type attribute: string
:param attribute: The attribute you wish to change.
* instanceType - A valid instance type (m1.small)
* kernel - Kernel ID (None)
* ramdisk - Ramdisk ID (None)
* userData - Base64 encoded String (None)
* disableApiTermination - Boolean (true)
* instanceInitiatedShutdownBehavior - stop|terminate
* sourceDestCheck - Boolean (true)
* groupSet - Set of Security Groups or IDs
* ebsOptimized - Boolean (false)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
return self.connection.modify_instance_attribute(
self.id,
attribute,
value,
dry_run=dry_run
)
def reset_attribute(self, attribute, dry_run=False):
"""
Resets an attribute of this instance to its default value.
:type attribute: string
:param attribute: The attribute to reset. Valid values are:
kernel|ramdisk
:rtype: bool
:return: Whether the operation succeeded or not
"""
return self.connection.reset_instance_attribute(
self.id,
attribute,
dry_run=dry_run
)
def create_image(self, name, description=None, no_reboot=False,
dry_run=False):
"""
Will create an AMI from the instance in the running or stopped
state.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the bundling process
should not attempt to shutdown the instance before
bundling. If this flag is True, the responsibility
of maintaining file system integrity is left to the
owner of the instance.
:rtype: string
:return: The new image id
"""
return self.connection.create_image(
self.id,
name,
description,
no_reboot,
dry_run=dry_run
)
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'timestamp':
self.timestamp = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class InstanceAttribute(dict):
ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData',
'disableApiTermination',
'instanceInitiatedShutdownBehavior',
'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck',
'groupSet']
def __init__(self, parent=None):
dict.__init__(self)
self.instance_id = None
self.request_id = None
self._current_value = None
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self[name] = BlockDeviceMapping()
return self[name]
elif name == 'groupSet':
self[name] = ResultSet([('item', Group)])
return self[name]
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'value':
if value == 'true':
value = True
elif value == 'false':
value = False
self._current_value = value
elif name in self.ValidValues:
self[name] = self._current_value
class SubParse(dict):
def __init__(self, section, parent=None):
dict.__init__(self)
self.section = section
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name != self.section:
self[name] = value
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/lib/Lib/asyncore.py
|
70
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def _exception (obj):
try:
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & (select.POLLIN | select.POLLPRI):
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=True, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = True
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
self._fileno = self.socket
self.add_channel()
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 1
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.connected = True
self.handle_connect()
else:
raise socket.error, (err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = True
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = True
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = True
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
|
cchurch/ansible
|
refs/heads/devel
|
test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py
|
68
|
#!/usr/bin/env python
# a script to check for illegal filenames on various Operating Systems. The
# main rules are derived from restrictions on Windows
# https://msdn.microsoft.com/en-us/library/aa365247#naming_conventions
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import struct
import sys
from ansible.module_utils.basic import to_bytes
ILLEGAL_CHARS = [
b'<',
b'>',
b':',
b'"',
b'/',
b'\\',
b'|',
b'?',
b'*'
] + [struct.pack("b", i) for i in range(32)]
ILLEGAL_NAMES = [
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
]
ILLEGAL_END_CHARS = [
'.',
' ',
]
def check_path(path, is_dir=False):
type_name = 'directory' if is_dir else 'file'
file_name = os.path.basename(path.rstrip(os.path.sep))
name = os.path.splitext(file_name)[0]
if name.upper() in ILLEGAL_NAMES:
print("%s: illegal %s name %s" % (path, type_name, name.upper()))
if file_name[-1] in ILLEGAL_END_CHARS:
print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
bfile = to_bytes(file_name, encoding='utf-8')
for char in ILLEGAL_CHARS:
if char in bfile:
bpath = to_bytes(path, encoding='utf-8')
print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
def main():
for path in sys.argv[1:] or sys.stdin.read().splitlines():
check_path(path, is_dir=path.endswith(os.path.sep))
if __name__ == '__main__':
main()
|
dladd/pyFormex
|
refs/heads/master
|
pyformex/gui/camera.py
|
1
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""OpenGL camera handling
"""
from __future__ import print_function
from numpy import *
import arraytools as at
inverse = linalg.linalg.inv
multiply = dot
def tand(arg):
"""Return the tan of an angle in degrees."""
return tan(arg*pi/180.)
import copy
import OpenGL.GL as GL
import OpenGL.GLU as GLU
def printModelviewMatrix(s="%s"):
print(s % GL.glGetFloatv(GL.GL_MODELVIEW_MATRIX))
built_in_views = {
'front': (0.,0.,0.),
'back': (180.,0.,0.),
'right': (90.,0.,0.),
'left': (270.,0.,0.),
'top': (0.,90.,0.),
'bottom': (0.,-90.,0.),
'iso0': (45.,45.,0.),
'iso1': (45.,135.,0.),
'iso2': (45.,225.,0.),
'iso3': (45.,315.,0.),
'iso4': (-45.,45.,0.),
'iso5': (-45.,135.,0.),
'iso6': (-45.,225.,0.),
'iso7': (-45.,315.,0.),
}
class ViewAngles(dict):
"""A dict to keep named camera angle settings.
This class keeps a dictionary of named angle settings. Each value is
a tuple of (longitude, latitude, twist) camera angles.
This is a static class which should not need to be instantiated.
There are seven predefined values: six for looking along global
coordinate axes, one isometric view.
"""
def __init__(self,data = built_in_views):
dict.__init__(self,data)
self['iso'] = self['iso0']
def get(self,name):
"""Get the angles for a named view.
Returns a tuple of angles (longitude, latitude, twist) if the
named view was defined, or None otherwise
"""
return dict.get(self,name,None)
view_angles = ViewAngles()
class Camera(object):
"""A camera for OpenGL rendering.
The Camera class holds all the camera related settings related to
the rendering of a scene in OpenGL. These include camera position,
the viewing direction of the camera, and the lens parameters (opening
angle, front and back clipping planes).
This class also provides convenient methods to change the settings so as
to get smooth camera manipulation.
Camera position and orientation:
The camera viewing line is defined by two points: the position of
the camera and the center of the scene the camera is looking at.
We use the center of the scene as the origin of a local coordinate
system to define the camera position. For convenience, this could be
stored in spherical coordinates, as a distance value and two angles:
longitude and latitude. Furthermore, the camera can also rotate around
its viewing line. We can define this by a third angle, the twist.
From these four values, the needed translation vector and rotation
matrix for the scene rendering may be calculated.
Inversely however, we can not compute a unique set of angles from
a given rotation matrix (this is known as 'gimball lock').
As a result, continuous (smooth) camera rotation by e.g. mouse control
requires that the camera orientation be stored as the full rotation
matrix, rather than as three angles. Therefore we store the camera
position and orientation as follows:
- `focus`: `[ x,y,z ]` : the reference point of the camera:
this is always a point on the viewing axis. Usually, it is set to
the center of the scene you are looking at.
- `dist`: distance of the camera to the reference point.
- `rot`: a 3x3 rotation matrix, rotating the global coordinate system
thus that the z-direction is oriented from center to camera.
These values have influence on the ModelView matrix.
Camera lens settings:
The lens parameters define the volume that is seen by the camera.
It is described by the following parameters:
- `fovy`: the vertical lens opening angle (Field Of View Y),
- `aspect`: the aspect ratio (width/height) of the lens. The product
`fovy * aspect` is the horizontal field of view.
- `near, far`: the position of the front and back clipping planes.
They are given as distances from the camera and should both be
strictly positive. Anything that is closer to the camera than
the `near` plane or further away than the `far` plane, will not be
shown on the canvas.
Camera methods that change these values will not directly change
the ModelView matrix. The :meth:`loadModelView` method has to be called
explicitely to make the settings active.
These values have influence on the Projection matrix.
Methods that change the camera position, orientation or lens parameters
will not directly change the related ModelView or Projection matrix.
They will just flag a change in the camera settings. The changes are
only activated by a call to the :meth:`loadModelView` or
:meth:`loadProjection` method, which will test the flags to see whether
the corresponding matrix needs a rebuild.
The default camera is at distance 1.0 of the center point [0.,0.,0.] and
looking in the -z direction.
Near and far clipping planes are by default set to 0.1, resp 10 times
the camera distance.
"""
# DEVELOPERS:
# The camera class assumes that matrixmode is always ModelView on entry.
# For operations in other modes, an explicit switch before the operations
# and afterwards back to ModelView should be performed.
def __init__(self,center=[0.,0.,0.], long=0., lat=0., twist=0., dist=1.):
"""Create a new camera at position (0,0,0) looking along the -z axis"""
self.locked = False
self.focus = center
self.dist = dist
self.setRotation(long,lat,twist)
self.setLens(45.,4./3.)
self.setClip(0.1,10.)
self.area = None
self.resetArea()
self.keep_aspect = True
self.setPerspective(True)
self.viewChanged = True
self.tracking = False
self.m = self.p = self.v = None
@property
def focus(self):
"""Return the camera reference (focus) point (the scene center)."""
return self._focus
@focus.setter
def focus(self,vector):
"""Set the camera reference point (the focus point).
The focus is the point the camer is looking at. It is a point on
the camera's optical axis.
- `vector`: (3,) float array: the global coordinates of the focus.
"""
if not self.locked:
self._focus = at.checkArray(vector,(3,),'f')
self.viewChanged = True
@property
def dist(self):
"""Return the camera distance."""
return self._dist
@dist.setter
def dist(self,dist):
"""Set the camera distance."""
if not self.locked:
if dist > 0.0 and dist != inf:
self._dist = dist
self.viewChanged = True
def getRot(self):
"""Return the camera rotation matrix."""
return self.rot
@property
def eye(self):
"""Return the absolute position of the camera."""
return self.toWorld([0.,0.,0.])
@property
def position(self):
"""Return the position of the camera relative to the focus."""
return self.eye - self.focus
def upVector(self):
return self.rot[:3,1].reshape(3)
def lock(self,onoff=True):
"""Lock/unlock a camera.
When a camera is locked, its position and lens parameters can not be
changed.
This can e.g. be used in multiple viewports layouts to create fixed
views from different angles.
"""
self.locked = bool(onoff)
def setAngles(self,angles):
"""Set the rotation angles.
angles is either:
- a tuple of angles (long,lat,twist)
- a named view corresponding to angles in view_angles
- None
"""
if not self.locked:
if type(angles) is str:
angles = view_angles.get(angles)
if angles is None:
return
self.setRotation(*angles)
def setRotation(self,long,lat,twist=0):
"""Set the rotation matrix of the camera from three angles."""
if not self.locked:
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glRotatef(-twist % 360, 0.0, 0.0, 1.0)
GL.glRotatef(lat % 360, 1.0, 0.0, 0.0)
GL.glRotatef(-long % 360, 0.0, 1.0, 0.0)
self.rot = GL.glGetFloatv(GL.GL_MODELVIEW_MATRIX)
self.viewChanged = True
def report(self):
"""Return a report of the current camera settings."""
return """Camera Settings:
Focus: %s
Position: %s
Distance: %s
Rotation Matrix:
%s
Up Vector: %s
Field of View y: %s
Aspect Ratio: %s
Area: %s, %s
Near/Far Clip: %s, %s
""" % (self.focus,self.eye,self.dist,self.rot,self.upVector(),self.fovy,self.aspect,self.area[0],self.area[1],self.near,self.far)
def dolly(self,val):
"""Move the camera eye towards/away from the scene center.
This has the effect of zooming. A value > 1 zooms out,
a value < 1 zooms in. The resulting enlargement of the view
will approximately be 1/val.
A zero value will move the camera to the center of the scene.
The front and back clipping planes may need adjustment after
a dolly operation.
"""
if not self.locked:
self.dist *= val
#print("DIST %s" % self.dist)
self.viewChanged = True
## def pan(self,val,axis=0):
## """Rotate the camera around axis through its eye.
## The camera is rotated around an axis through the eye point.
## For axes 0 and 1, this will move the center, creating a panning
## effect. The default axis is parallel to the y-axis, resulting in
## horizontal panning. For vertical panning (axis=1) a convenience
## alias tilt is created.
## For axis = 2 the operation is equivalent to the rotate operation.
## """
## if not self.locked:
## if axis==0 or axis ==1:
## pos = self.eye
## self.eye[axis] = (self.eye[axis] + val) % 360
## self.focus = diff(pos,sphericalToCartesian(self.eye))
## elif axis==2:
## self.twist = (self.twist + val) % 360
## self.viewChanged = True
## def tilt(self,val):
## """Rotate the camera up/down around its own horizontal axis.
## The camera is rotated around and perpendicular to the plane of the
## y-axis and the viewing axis. This has the effect of a vertical pan.
## A positive value tilts the camera up, shifting the scene down.
## The value is specified in degrees.
## """
## if not self.locked:
## self.pan(val,1)
## self.viewChanged = True
def move(self,dx,dy,dz):
"""Move the camera over translation (dx,dy,dz) in global coordinates.
The center of the camera is moved over the specified translation
vector. This has the effect of moving the scene in opposite direction.
"""
if not self.locked:
self.focus += [dx,dy,dz]
## def truck(self,dx,dy,dz):
## """Move the camera translation vector in local coordinates.
## This has the effect of moving the scene in opposite direction.
## Positive coordinates mean:
## first coordinate : truck right,
## second coordinate : pedestal up,
## third coordinate : dolly out.
## """
## pos = self.eye
## ang = self.getAngles()
## tr = [dx,dy,dz]
## for i in [1,0,2]:
## r = rotationMatrix(i,ang[i])
## tr = multiply(tr, r)
## self.move(*tr)
## self.viewChanged = True
def lookAt(self,eye,center,up):
if not self.locked:
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GLU.gluLookAt(*concatenate([eye,center,up]))
self.saveModelView()
def rotate(self,val,vx,vy,vz):
"""Rotate the camera around current axis (vx,vy,vz)."""
if not self.locked:
GL.glMatrixMode(GL.GL_MODELVIEW)
self.saveModelView()
GL.glLoadIdentity()
GL.glTranslatef(0,0,-self.dist)
GL.glRotatef(val,vx,vy,vz)
GL.glMultMatrixf(self.rot)
dx,dy,dz = self.focus
GL.glTranslatef(-dx,-dy,-dz)
self.saveModelView()
def saveModelView (self):
"""Save the ModelView matrix."""
if not self.locked:
self.m = GL.glGetFloatv(GL.GL_MODELVIEW_MATRIX)
self.rot = copy.deepcopy(self.m)
self.trl = copy.deepcopy(self.rot[3,0:3])
#print("Translation: %s" % self.trl)
self.rot[3,0:3] = [0.,0.,0.]
#print "Rotation: %s" % self.rot
def setModelView(self):
"""Set the ModelView matrix from camera parameters.
"""
if not self.locked:
# The camera operations are applied on the model space
# Arguments should be taken negative and applied in backwards order
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
# translate over camera distance
GL.glTranslate(0,0,-self.dist)
# rotate according to current rotation matrix
GL.glMultMatrixf(self.rot)
# translate to center
dx,dy,dz = self.focus
GL.glTranslatef(-dx,-dy,-dz)
def loadModelView (self,m=None):
"""Load the ModelView matrix.
There are three uses of this function:
- Without argument and if the viewing parameters have not changed
since the last save of the ModelView matrix, this will just reload
the ModelView matrix from the saved value.
- If an argument is supplied, it should be a legal ModelView matrix
and that matrix will be loaded (and saved) as the new ModelView
matrix.
- Else, a new ModelView matrix is set up from the camera parameters,
and it is loaded and saved.
In the latter two cases, the new ModelView matrix is saved, and if
a camera attribute `modelview_callback` has been set, a call to
this function is done, passing the camera instance as parameter.
"""
if not self.locked:
GL.glMatrixMode(GL.GL_MODELVIEW)
if m is not None or self.viewChanged:
if m is not None:
GL.glLoadMatrixf(m)
else:
self.setModelView()
self.saveModelView()
try:
self.modelview_callback(self)
except:
pass
self.viewChanged = False
else:
GL.glLoadMatrixf(self.m)
def loadCurrentRotation (self):
"""Load the current ModelView matrix with translations canceled out."""
rot = GL.glGetFloatv(GL.GL_MODELVIEW_MATRIX)
rot[3,0:3] = [0.,0.,0.]
GL.glLoadMatrixf(rot)
def translate(self,vx,vy,vz,local=True):
if not self.locked:
if local:
vx,vy,vz = self.toWorld([vx,vy,vz,1])
self.move(-vx,-vy,-vz)
def transform(self,v):
"""Transform a vertex using the currently saved Modelview matrix."""
if len(v) == 3:
v = v + [ 1. ]
v = multiply([v],self.m)[0]
return [ a/v[3] for a in v[0:3] ]
## def toWorld(self,v,trl=False):
## """Transform a vertex from camera to world coordinates.
## The specified vector can have 3 or 4 (homogoneous) components.
## This uses the currently saved rotation matrix.
## """
## a = inverse(array(self.rot))
## if len(v) == 3:
## v = v + [ 1. ]
## v = multiply(array(v),a)
## return v[0:3] / v[3]
# TO MATRIX4?
def toWorld(self,v):
"""Transform a vertex from camera to world coordinates.
This multiplies
The specified vector can have 3 or 4 (homogoneous) components.
This uses the currently saved rotation matrix.
"""
v = at.checkArray(v,(3,),'f') + [0.,0.,self.dist]
return dot(v,transpose(self.rot[:3,:3])) + self.focus
def setLens(self,fovy=None,aspect=None):
"""Set the field of view of the camera.
We set the field of view by the vertical opening angle fovy
and the aspect ratio (width/height) of the viewing volume.
A parameter that is not specified is left unchanged.
"""
if fovy:
self.fovy = min(abs(fovy),180)
if aspect:
self.aspect = abs(aspect)
self.lensChanged = True
def resetArea(self):
"""Set maximal camera area.
Resets the camera window area to its maximum values corresponding
to the fovy setting, symmetrical about the camera axes.
"""
self.setArea(0.,0.,1.,1.,False)
def setArea(self,hmin,vmin,hmax,vmax,relative=True,center=False,clip=True):
"""Set the viewable area of the camera."""
area = array([hmin,vmin,hmax,vmax])
if clip:
area = area.clip(0.,1.)
if area[0] < area[2] and area[1] < area[3]:
area = area.reshape(2,2)
mean = (area[1]+area[0]) * 0.5
diff = (area[1]-area[0]) * 0.5
if relative:
if center:
mean = zeros(2)
if self.keep_aspect:
aspect = diff[0] / diff[1]
if aspect > 1.0:
diff[1] = diff[0] #/ self.aspect
# no aspect factor: this is relative!!!
else:
diff[0] = diff[1] #* self.aspect
area[0] = mean-diff
area[1] = mean+diff
#print("RELATIVE AREA %s" % (area))
area = (1.-area) * self.area[0] + area * self.area[1]
#print("OLD ZOOM AREA %s (aspect %s)" % (self.area,self.aspect))
#print("NEW ZOOM AREA %s" % (area))
self.area = area
self.lensChanged = True
def zoomArea(self,val=0.5,area=None):
"""Zoom in/out by shrinking/enlarging the camera view area.
The zoom factor is relative to the current setting.
Values smaller than 1.0 zoom in, larger values zoom out.
"""
if val>0:
#val = (1.-val) * 0.5
#self.setArea(val,val,1.-val,1.-val,center=center)
if area is None:
area = self.area
#print("ZOOM AREA %s (%s)" % (area.tolist(),val))
mean = (area[1]+area[0]) * 0.5
diff = (area[1]-area[0]) * 0.5 * val
area[0] = mean-diff
area[1] = mean+diff
self.area = area
#print("CAMERA AREA %s" % self.area.tolist())
self.lensChanged = True
def transArea(self,dx,dy):
"""Pan by moving the vamera area.
dx and dy are relative movements in fractions of the
current area size.
"""
#print("TRANSAREA %s,%s" % (dx,dy))
area = self.area
diff = (area[1]-area[0]) * array([dx,dy])
area += diff
self.area = area
self.lensChanged = True
def setClip(self,near,far):
"""Set the near and far clipping planes"""
if near > 0 and near < far:
self.near,self.far = near,far
self.lensChanged = True
else:
print("Error: Invalid Near/Far clipping values")
## def setClipRel(self,near,far):
## """Set the near and far clipping planes"""
## if near > 0 and near < far:
## self.near,self.far = near,far
## self.lensChanged = True
## else:
## print("Error: Invalid Near/Far clipping values")
def setPerspective(self,on=True):
"""Set perspective on or off"""
self.perspective = on
self.lensChanged = True
## def zoom(self,val=0.5):
## """Zoom in/out by shrinking/enlarging the camera view angle.
## The zoom factor is relative to the current setting.
## Use setFovy() to specify an absolute setting.
## """
## if val>0:
## self.fovy *= val
## self.lensChanged = True
def loadProjection(self,force=False,pick=None,keepmode=False):
"""Load the projection/perspective matrix.
The caller will have to setup the correct GL environment beforehand.
No need to set matrix mode though. This function will switch to
GL_PROJECTION mode before loading the matrix
If keepmode=True, does not switch back to GL_MODELVIEW mode.
A pick region can be defined to use the camera in picking mode.
pick defines the picking region center and size (x,y,w,h).
This function does it best at autodetecting changes in the lens
settings, and will only reload the matrix if such changes are
detected. You can optionally force loading the matrix.
"""
GL.glMatrixMode(GL.GL_PROJECTION)
if self.lensChanged or force:
GL.glLoadIdentity()
if pick:
GLU.gluPickMatrix(*pick)
fv = tand(self.fovy*0.5)
if self.perspective:
fv *= self.near
else:
fv *= self.dist
fh = fv * self.aspect
x0,x1 = 2*self.area - 1.0
frustum = (fh*x0[0],fh*x1[0],fv*x0[1],fv*x1[1],self.near,self.far)
if self.perspective:
GL.glFrustum(*frustum)
else:
GL.glOrtho(*frustum)
try:
self.projection_callback(self)
except:
pass
if not keepmode:
GL.glMatrixMode(GL.GL_MODELVIEW)
#### global manipulation ###################
def set3DMatrices(self):
self.loadProjection()
self.loadModelView()
# this is saved by loadModelView
#self.m = GL.glGetDoublev(GL.GL_MODELVIEW_MATRIX)
##!!! self.p and self.v should be saved as we do with self.m
self.p = GL.glGetDoublev(GL.GL_PROJECTION_MATRIX)
self.v = GL.glGetIntegerv(GL.GL_VIEWPORT)
def project(self,x,y,z):
"Map the object coordinates (x,y,z) to window coordinates."""
self.set3DMatrices()
return GLU.gluProject(x,y,z,self.m.astype(double),self.p,self.v)
def unProject(self,x,y,z):
"Map the window coordinates (x,y,z) to object coordinates."""
self.set3DMatrices()
return GLU.gluUnProject(x,y,z,self.m.astype(double),self.p,self.v)
def setTracking(self,onoff=True):
"""Enable/disable coordinate tracking using the camera"""
if onoff:
self.tracking = True
self.set3DMatrices()
else:
self.tracking = False
#############################################################################
if __name__ == "__main__":
from OpenGL.GLUT import *
import sys
def init():
GL.glClearColor (0.0, 0.0, 0.0, 0.0)
GL.glShadeModel (GL.GL_FLAT)
def display():
global cam
GL.glClear (GL.GL_COLOR_BUFFER_BIT)
GL.glColor3f (1.0, 1.0, 1.0)
GL.glLoadIdentity () # clear the matrix
cam.loadProjection()
cam.loadModelView()
glutWireCube (1.0)
GL.glFlush ()
def reshape (w, h):
GL.glViewport (0, 0, w, h)
GL.glMatrixMode (GL.GL_PROJECTION)
GL.glLoadIdentity ()
GL.glFrustum (-1.0, 1.0, -1.0, 1.0, 1.5, 20.0)
GL.glMatrixMode (GL.GL_MODELVIEW)
def keyboard(key, x, y):
global cam
if key == 27:
sys.exit()
elif key == 'd':
cam.dolly(1.1)
elif key == 'D':
cam.dolly(0.9)
elif key == 'r':
cam.rotate(5.)
elif key == 'R':
cam.rotate(-5.)
elif key == 's':
cam.rotate(5.,1)
elif key == 'S':
cam.rotate(-5.,1)
elif key == 'w':
cam.rotate(5.,2)
elif key == 'W':
cam.rotate(-5.,2)
elif key == 'p':
cam.pan(5.)
elif key == 'P':
cam.pan(-5.)
elif key == 't':
cam.tilt(5.)
elif key == 'T':
cam.tilt(-5.)
elif key == 'h':
cam.move(0.2,0.,0.)
elif key == 'H':
cam.move(-0.2,0.,0.)
elif key == 'v':
cam.move(0.,0.2,0.)
elif key == 'V':
cam.move(0.,-0.2,0.)
elif key == '+':
cam.zoom(0.8)
elif key == '-':
cam.zoom(1.25)
## elif key == 'x':
## cam.truck([0.5,0.,0.])
## elif key == 'X':
## cam.truck([-0.5,0.,0.])
## elif key == 'y':
## cam.truck([0.,0.5,0.])
## elif key == 'Y':
## cam.truck([0.,-0.5,0.])
## elif key == 'z':
## cam.truck([0.,0.,0.5])
## elif key == 'Z':
## cam.truck([0.,0.,-0.5])
elif key == 'o':
cam.setPerspective(not cam.perspective)
else:
print(key)
display()
def main():
global cam
glutInit(sys.argv)
glutInitDisplayMode (GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize (500, 500)
#glutInitWindowPosition (100, 100)
glutCreateWindow (sys.argv[0])
init ()
cam = Camera(center=[0.,0.,0.],dist=5.)
cam.setLens(45.,1.)
glutDisplayFunc(display)
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
glutMainLoop()
return 0
main()
# End
|
opendxl/opendxl-epo-service-python
|
refs/heads/master
|
clean.py
|
1
|
from __future__ import absolute_import
from __future__ import print_function
import os
# pylint: disable=no-name-in-module, import-error
from distutils.dir_util import remove_tree
from shutil import copyfile
def clean_dir(src_dir, directory):
if os.path.exists(directory):
print("Cleaning directory: " + directory + "\n")
for f in os.listdir(directory):
target_file = os.path.join(directory, f)
if not os.path.isdir(target_file) \
and not f.lower().endswith(".py"):
os.remove(os.path.join(directory, f))
for f in os.listdir(src_dir):
src_file = os.path.join(src_dir, f)
if not os.path.isdir(src_file) and \
not f.lower().endswith(".py") and \
not f.lower().endswith(".pyc"):
copyfile(os.path.join(src_dir, f), os.path.join(directory, f))
print("Starting clean.\n")
DIST_PY_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__))
DIST_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dist")
CONFIG_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "config")
SAMPLE_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "sample")
CONFIG_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxleposervice",
"_config", "app")
SAMPLE_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxleposervice",
"_config", "sample")
# Remove the dist directory if it exists
if os.path.exists(DIST_DIRECTORY):
print("Removing dist directory: " + DIST_DIRECTORY + "\n")
remove_tree(DIST_DIRECTORY, verbose=1)
# Clean the config directory
clean_dir(CONFIG_SRC_DIRECTORY, CONFIG_DIRECTORY)
# Clean the samples directory
clean_dir(SAMPLE_SRC_DIRECTORY, SAMPLE_DIRECTORY)
# Clean .pyc files
print("Cleaning .pyc files")
for root, dirs, files in os.walk(DIST_PY_FILE_LOCATION):
for source_file in files:
full_path = os.path.join(root, source_file)
if full_path.lower().endswith(".pyc"):
os.remove(full_path)
|
anilmuthineni/tensorflow
|
refs/heads/master
|
tensorflow/examples/tutorials/input_fn/boston.py
|
12
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_cols, hidden_units=[10, 10])
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
|
gram526/VTK
|
refs/heads/master
|
Imaging/Core/Testing/Python/TestAllBlends.py
|
26
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllBlends(vtk.test.Testing.vtkTest):
def testAllBlends(self):
# This script calculates the luminance of an image
renWin = vtk.vtkRenderWindow()
renWin.SetSize(512, 256)
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
image2 = vtk.vtkBMPReader()
image2.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
# shrink the images to a reasonable size
color = vtk.vtkImageShrink3D()
color.SetInputConnection(image1.GetOutputPort())
color.SetShrinkFactors(2, 2, 1)
backgroundColor = vtk.vtkImageShrink3D()
backgroundColor.SetInputConnection(image2.GetOutputPort())
backgroundColor.SetShrinkFactors(2, 2, 1)
# create a greyscale version
luminance = vtk.vtkImageLuminance()
luminance.SetInputConnection(color.GetOutputPort())
backgroundLuminance = vtk.vtkImageLuminance()
backgroundLuminance.SetInputConnection(backgroundColor.GetOutputPort())
# create an alpha mask
table = vtk.vtkLookupTable()
table.SetTableRange(220, 255)
table.SetValueRange(1, 0)
table.SetSaturationRange(0, 0)
table.Build()
alpha = vtk.vtkImageMapToColors()
alpha.SetInputConnection(luminance.GetOutputPort())
alpha.SetLookupTable(table)
alpha.SetOutputFormatToLuminance()
# make luminanceAlpha and colorAlpha versions
luminanceAlpha = vtk.vtkImageAppendComponents()
luminanceAlpha.AddInputConnection(luminance.GetOutputPort())
luminanceAlpha.AddInputConnection(alpha.GetOutputPort())
colorAlpha = vtk.vtkImageAppendComponents()
colorAlpha.AddInputConnection(color.GetOutputPort())
colorAlpha.AddInputConnection(alpha.GetOutputPort())
foregrounds = ["luminance", "luminanceAlpha", "color", "colorAlpha"]
backgrounds = ["backgroundColor", "backgroundLuminance"]
deltaX = 1.0 / 4.0
deltaY = 1.0 / 2.0
blend = dict()
mapper = dict()
actor = dict()
imager = dict()
for row, bg in enumerate(backgrounds):
for column, fg in enumerate(foregrounds):
blend.update({bg:{fg:vtk.vtkImageBlend()}})
blend[bg][fg].AddInputConnection(eval(bg + '.GetOutputPort()'))
if bg == "backgroundColor" or fg == "luminance" or fg == "luminanceAlpha":
blend[bg][fg].AddInputConnection(eval(fg + '.GetOutputPort()'))
blend[bg][fg].SetOpacity(1, 0.8)
mapper.update({bg:{fg:vtk.vtkImageMapper()}})
mapper[bg][fg].SetInputConnection(blend[bg][fg].GetOutputPort())
mapper[bg][fg].SetColorWindow(255)
mapper[bg][fg].SetColorLevel(127.5)
actor.update({bg:{fg:vtk.vtkActor2D()}})
actor[bg][fg].SetMapper(mapper[bg][fg])
imager.update({bg:{fg:vtk.vtkRenderer()}})
imager[bg][fg].AddActor2D(actor[bg][fg])
imager[bg][fg].SetViewport(column * deltaX, row * deltaY, (column + 1) * deltaX, (row + 1) * deltaY)
renWin.AddRenderer(imager[bg][fg])
column += 1
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllBlends.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllBlends, 'test')])
|
writefaruq/lionface-app
|
refs/heads/master
|
django/__init__.py
|
9
|
VERSION = (1, 3, 0, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
from django.utils.version import get_svn_revision
svn_rev = get_svn_revision()
if svn_rev != u'SVN-unknown':
version = "%s %s" % (version, svn_rev)
return version
|
justinpotts/mozillians
|
refs/heads/master
|
vendor-local/lib/python/celery/task/schedules.py
|
32
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import warnings
from ..schedules import schedule, crontab_parser, crontab # noqa
from ..exceptions import CDeprecationWarning
warnings.warn(CDeprecationWarning(
"celery.task.schedules is deprecated and renamed to celery.schedules"))
|
takeflight/wagtail
|
refs/heads/master
|
wagtail/contrib/frontend_cache/backends.py
|
7
|
import logging
import uuid
from collections import defaultdict
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse, urlunparse
from urllib.request import Request, urlopen
import requests
from django.core.exceptions import ImproperlyConfigured
from wagtail import __version__
logger = logging.getLogger('wagtail.frontendcache')
class PurgeRequest(Request):
def get_method(self):
return 'PURGE'
class BaseBackend:
def purge(self, url):
raise NotImplementedError
def purge_batch(self, urls):
# Fallback for backends that do not support batch purging
for url in urls:
self.purge(url)
class HTTPBackend(BaseBackend):
def __init__(self, params):
location_url_parsed = urlparse(params.pop('LOCATION'))
self.cache_scheme = location_url_parsed.scheme
self.cache_netloc = location_url_parsed.netloc
def purge(self, url):
url_parsed = urlparse(url)
host = url_parsed.hostname
# Append port to host if it is set in the original URL
if url_parsed.port:
host += (':' + str(url_parsed.port))
request = PurgeRequest(
url=urlunparse([
self.cache_scheme,
self.cache_netloc,
url_parsed.path,
url_parsed.params,
url_parsed.query,
url_parsed.fragment
]),
headers={
'Host': host,
'User-Agent': 'Wagtail-frontendcache/' + __version__
}
)
try:
urlopen(request)
except HTTPError as e:
logger.error("Couldn't purge '%s' from HTTP cache. HTTPError: %d %s", url, e.code, e.reason)
except URLError as e:
logger.error("Couldn't purge '%s' from HTTP cache. URLError: %s", url, e.reason)
class CloudflareBackend(BaseBackend):
CHUNK_SIZE = 30
def __init__(self, params):
self.cloudflare_email = params.pop("EMAIL", None)
self.cloudflare_api_key = (
params.pop("TOKEN", None)
or params.pop("API_KEY", None)
)
self.cloudflare_token = params.pop("BEARER_TOKEN", None)
self.cloudflare_zoneid = params.pop("ZONEID")
if (
(not self.cloudflare_email and self.cloudflare_api_key)
or (self.cloudflare_email and not self.cloudflare_api_key)
or (not any([self.cloudflare_email, self.cloudflare_api_key, self.cloudflare_token]))
):
raise ImproperlyConfigured(
"The setting 'WAGTAILFRONTENDCACHE' requires both 'EMAIL' and 'API_KEY', or 'BEARER_TOKEN' to be specified."
)
def _purge_urls(self, urls):
try:
purge_url = 'https://api.cloudflare.com/client/v4/zones/{0}/purge_cache'.format(self.cloudflare_zoneid)
headers = {"Content-Type": "application/json"}
if self.cloudflare_token:
headers["Authorization"] = "Bearer {}".format(self.cloudflare_token)
else:
headers["X-Auth-Email"] = self.cloudflare_email
headers["X-Auth-Key"] = self.cloudflare_api_key
data = {"files": urls}
response = requests.delete(
purge_url,
json=data,
headers=headers,
)
try:
response_json = response.json()
except ValueError:
if response.status_code != 200:
response.raise_for_status()
else:
for url in urls:
logger.error("Couldn't purge '%s' from Cloudflare. Unexpected JSON parse error.", url)
except requests.exceptions.HTTPError as e:
for url in urls:
logging.exception("Couldn't purge '%s' from Cloudflare. HTTPError: %d", url, e.response.status_code)
return
if response_json['success'] is False:
error_messages = ', '.join([str(err['message']) for err in response_json['errors']])
for url in urls:
logger.error("Couldn't purge '%s' from Cloudflare. Cloudflare errors '%s'", url, error_messages)
return
def purge_batch(self, urls):
# Break the batched URLs in to chunks to fit within Cloudflare's maximum size for
# the purge_cache call (https://api.cloudflare.com/#zone-purge-files-by-url)
for i in range(0, len(urls), self.CHUNK_SIZE):
chunk = urls[i:i + self.CHUNK_SIZE]
self._purge_urls(chunk)
def purge(self, url):
self.purge_batch([url])
class CloudfrontBackend(BaseBackend):
def __init__(self, params):
import boto3
self.client = boto3.client('cloudfront')
try:
self.cloudfront_distribution_id = params.pop('DISTRIBUTION_ID')
except KeyError:
raise ImproperlyConfigured(
"The setting 'WAGTAILFRONTENDCACHE' requires the object 'DISTRIBUTION_ID'."
)
def purge_batch(self, urls):
paths_by_distribution_id = defaultdict(list)
for url in urls:
url_parsed = urlparse(url)
distribution_id = None
if isinstance(self.cloudfront_distribution_id, dict):
host = url_parsed.hostname
if host in self.cloudfront_distribution_id:
distribution_id = self.cloudfront_distribution_id.get(host)
else:
logger.info(
"Couldn't purge '%s' from CloudFront. Hostname '%s' not found in the DISTRIBUTION_ID mapping",
url, host)
else:
distribution_id = self.cloudfront_distribution_id
if distribution_id:
paths_by_distribution_id[distribution_id].append(url_parsed.path)
for distribution_id, paths in paths_by_distribution_id.items():
self._create_invalidation(distribution_id, paths)
def purge(self, url):
self.purge_batch([url])
def _create_invalidation(self, distribution_id, paths):
import botocore
try:
self.client.create_invalidation(
DistributionId=distribution_id,
InvalidationBatch={
'Paths': {
'Quantity': len(paths),
'Items': paths
},
'CallerReference': str(uuid.uuid4())
}
)
except botocore.exceptions.ClientError as e:
for path in paths:
logger.error(
"Couldn't purge path '%s' from CloudFront (DistributionId=%s). ClientError: %s %s",
path,
distribution_id,
e.response['Error']['Code'],
e.response['Error']['Message']
)
|
codecop/diy-lang
|
refs/heads/try2
|
site-packages/nose/plugins/capture.py
|
69
|
"""
This plugin captures stdout during test execution. If the test fails
or raises an error, the captured output will be appended to the error
or failure output. It is enabled by default but can be disabled with
the options ``-s`` or ``--nocapture``.
:Options:
``--nocapture``
Don't capture stdout (any stdout output will be printed immediately)
"""
import logging
import os
import sys
from nose.plugins.base import Plugin
from nose.pyversion import exc_to_unicode, force_unicode
from nose.util import ln
from StringIO import StringIO
log = logging.getLogger(__name__)
class Capture(Plugin):
"""
Output capture plugin. Enabled by default. Disable with ``-s`` or
``--nocapture``. This plugin captures stdout during test execution,
appending any output captured to the error or failure output,
should the test fail or raise an error.
"""
enabled = True
env_opt = 'NOSE_NOCAPTURE'
name = 'capture'
score = 1600
def __init__(self):
self.stdout = []
self._buf = None
def options(self, parser, env):
"""Register commandline options
"""
parser.add_option(
"-s", "--nocapture", action="store_false",
default=not env.get(self.env_opt), dest="capture",
help="Don't capture stdout (any stdout output "
"will be printed immediately) [NOSE_NOCAPTURE]")
def configure(self, options, conf):
"""Configure plugin. Plugin is enabled by default.
"""
self.conf = conf
if not options.capture:
self.enabled = False
def afterTest(self, test):
"""Clear capture buffer.
"""
self.end()
self._buf = None
def begin(self):
"""Replace sys.stdout with capture buffer.
"""
self.start() # get an early handle on sys.stdout
def beforeTest(self, test):
"""Flush capture buffer.
"""
self.start()
def formatError(self, test, err):
"""Add captured output to error report.
"""
test.capturedOutput = output = self.buffer
self._buf = None
if not output:
# Don't return None as that will prevent other
# formatters from formatting and remove earlier formatters
# formats, instead return the err we got
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, output), tb)
def formatFailure(self, test, err):
"""Add captured output to failure report.
"""
return self.formatError(test, err)
def addCaptureToErr(self, ev, output):
ev = exc_to_unicode(ev)
output = force_unicode(output)
return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
output, ln(u'>> end captured stdout <<')])
def start(self):
self.stdout.append(sys.stdout)
self._buf = StringIO()
sys.stdout = self._buf
def end(self):
if self.stdout:
sys.stdout = self.stdout.pop()
def finalize(self, result):
"""Restore stdout.
"""
while self.stdout:
self.end()
def _get_buffer(self):
if self._buf is not None:
return self._buf.getvalue()
buffer = property(_get_buffer, None, None,
"""Captured stdout output.""")
|
papercapp/django-banner-rotator
|
refs/heads/master
|
banner_rotator/south_migrations/0004_auto__del_field_banner_impressions.py
|
2
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Banner.impressions'
db.delete_column('banner_rotator_banner', 'impressions')
def backwards(self, orm):
# Adding field 'Banner.impressions'
db.add_column('banner_rotator_banner', 'impressions', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'banner_rotator.banner': {
'Meta': {'object_name': 'Banner'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'banners'", 'to': "orm['banner_rotator.Campaign']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'weight': ('django.db.models.fields.IntegerField', [], {})
},
'banner_rotator.campaign': {
'Meta': {'object_name': 'Campaign'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'banner_rotator.click': {
'Meta': {'object_name': 'Click'},
'banner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clicks'", 'to': "orm['banner_rotator.Banner']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'referrer': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clicks'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['banner_rotator']
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/Selection3.after.py
|
83
|
if True:
a = 1
b = 2
def f():
if True:
a = 1
b = 2
|
alexandrucoman/vbox-nova-driver
|
refs/heads/master
|
nova/virt/libvirt/firewall.py
|
4
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.cloudpipe import pipelib
from nova.i18n import _LI
from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
"""This class implements a network filtering mechanism by using
libvirt's nwfilter.
all instances get a filter ("nova-base") applied. This filter
provides some basic security such as protection against MAC
spoofing, IP spoofing, and ARP spoofing.
"""
def __init__(self, virtapi, host, **kwargs):
"""Create an NWFilter firewall driver
:param virtapi: nova.virt.virtapi.VirtAPI instance
:param host: nova.virt.libvirt.host.Host instance
:param kwargs: currently unused
"""
super(NWFilterFirewall, self).__init__(virtapi)
global libvirt
if libvirt is None:
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warn(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
self._host = host
self.static_filters_configured = False
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
def nova_no_nd_reflection_filter(self):
"""This filter protects false positives on IPv6 Duplicate Address
Detection(DAD).
"""
uuid = self._get_filter_uuid('nova-no-nd-reflection')
return '''<filter name='nova-no-nd-reflection' chain='ipv6'>
<!-- no nd reflection -->
<!-- drop if destination mac is v6 mcast mac addr and
we sent it. -->
<uuid>%s</uuid>
<rule action='drop' direction='in'>
<mac dstmacaddr='33:33:00:00:00:00'
dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/>
</rule>
</filter>''' % uuid
def nova_dhcp_filter(self):
"""The standard allow-dhcp-server filter is an <ip> one, so it uses
ebtables to allow traffic through. Without a corresponding rule in
iptables, it'll get blocked anyway.
"""
uuid = self._get_filter_uuid('nova-allow-dhcp-server')
return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
<uuid>%s</uuid>
<rule action='accept' direction='out'
priority='100'>
<udp srcipaddr='0.0.0.0'
dstipaddr='255.255.255.255'
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
</rule>
</filter>''' % uuid
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
instance=instance)
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
return
LOG.info(_LI('Ensuring static filters'), instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
dhcp_base_filter = self.get_base_filter_list(instance, True)
for vif in network_info:
_base_filter = nodhcp_base_filter
for subnet in vif['network']['subnets']:
if subnet.get_meta('dhcp_server'):
_base_filter = dhcp_base_filter
break
self._define_filter(self._get_instance_filter_xml(instance,
_base_filter,
vif))
def _get_instance_filter_parameters(self, vif):
parameters = []
def format_parameter(parameter, value):
return ("<parameter name='%s' value='%s'/>" % (parameter, value))
network = vif['network']
if not vif['network'] or not vif['network']['subnets']:
return parameters
v4_subnets = [s for s in network['subnets'] if s['version'] == 4]
v6_subnets = [s for s in network['subnets'] if s['version'] == 6]
for subnet in v4_subnets:
for ip in subnet['ips']:
parameters.append(format_parameter('IP', ip['address']))
dhcp_server = subnet.get_meta('dhcp_server')
if dhcp_server:
parameters.append(format_parameter('DHCPSERVER', dhcp_server))
if CONF.use_ipv6:
for subnet in v6_subnets:
gateway = subnet.get('gateway')
if gateway:
ra_server = gateway['address'] + "/128"
parameters.append(format_parameter('RASERVER', ra_server))
if CONF.allow_same_net_traffic:
for subnet in v4_subnets:
ipv4_cidr = subnet['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
parameters.append(format_parameter('PROJNET', net))
parameters.append(format_parameter('PROJMASK', mask))
if CONF.use_ipv6:
for subnet in v6_subnets:
ipv6_cidr = subnet['cidr']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
parameters.append(format_parameter('PROJNET6', net))
parameters.append(format_parameter('PROJMASK6', prefix))
return parameters
def _get_instance_filter_xml(self, instance, filters, vif):
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
parameters = self._get_instance_filter_parameters(vif)
uuid = self._get_filter_uuid(instance_filter_name)
xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
xml += '<uuid>%s</uuid>' % uuid
for f in filters:
xml += '''<filterref filter='%s'>''' % f
xml += ''.join(parameters)
xml += '</filterref>'
xml += '</filter>'
return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""Obtain a list of base filters to apply to an instance.
The return value should be a list of strings, each
specifying a filter name. Subclasses can override this
function to add additional filters as needed. Additional
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance.image_ref):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
There is no configuration or tuneability of these filters, so they
can be set up once and forgotten about.
"""
if self.static_filters_configured:
return
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
self._define_filter(self.nova_no_nd_reflection_filter())
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
self._define_filter(self._filter_container('nova-vpn',
['allow-dhcp-server']))
self._define_filter(self.nova_dhcp_filter())
self.static_filters_configured = True
def _filter_container(self, name, filters):
uuid = self._get_filter_uuid(name)
xml = '''<filter name='%s' chain='root'>
<uuid>%s</uuid>
%s
</filter>''' % (name, uuid,
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
def _get_filter_uuid(self, name):
try:
flt = self._conn.nwfilterLookupByName(name)
xml = flt.XMLDesc(0)
doc = etree.fromstring(xml)
u = doc.find("./uuid").text
except Exception as e:
LOG.debug(u"Cannot find UUID for filter '%(name)s': '%(e)s'",
{'name': name, 'e': e})
u = uuid.uuid4().hex
LOG.debug("UUID for filter '%s' is '%s'" % (name, u))
return u
def _define_filter(self, xml):
if callable(xml):
xml = xml()
self._conn.nwfilterDefineXML(xml)
def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
_nw = self._conn.nwfilterLookupByName(instance_filter_name)
_nw.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
LOG.debug('The nwfilter(%s) is not found.',
instance_filter_name, instance=instance)
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance.name)
return 'nova-instance-%s-%s' % (instance.name, nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
for vif in network_info:
nic_id = vif['address'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance.name
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
'name': name},
instance=instance)
return False
return True
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def __init__(self, virtapi, execute=None, **kwargs):
"""Create an IP tables firewall driver instance
:param virtapi: nova.virt.virtapi.VirtAPI instance
:param execute: unused, pass None
:param kwargs: extra arguments
The @kwargs parameter must contain a key 'host' that
maps to an instance of the nova.virt.libvirt.host.Host
class.
"""
super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self.nwfilter = NWFilterFirewall(virtapi, kwargs['host'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basically_filtered:
LOG.debug('iptables firewall: Setup Basic Filtering',
instance=instance)
self.refresh_provider_fw_rules()
self.basically_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
|
alexbrasetvik/Piped
|
refs/heads/develop
|
setup.py
|
1
|
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import os
import sys
from setuptools import setup, find_packages
# add piped to the package path so we can get the version from the source tree
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here)
import piped
packages = find_packages(where=here)
package_data = {
'piped': ['conf.yml', 'logging.yaml', 'service.tac'],
'piped.test': [
'data/bar.txt', 'data/foo.txt', 'data/test_conf.yml',
'data/test_includes.yml', 'data/baz/bar.baz', 'data/baz/foo.bar',
'data/test_config_nesting.yml'
],
'piped.providers.test': [
'data/bar', 'data/foo',
'data/persisted_json.json', 'data/persisted_pickle.pickle',
'data/server.crt', 'data/server.key'
]
}
# sdist completely ignores package_data, so we generate a MANIFEST.in
# http://docs.python.org/distutils/sourcedist.html#specifying-the-files-to-distribute
with open(os.path.join(here, 'MANIFEST.in'), 'w') as manifest:
for package, files in package_data.items():
for file in files:
manifest.write('include %s \n'%os.path.join(package.replace('.', os.path.sep), file))
manifest.write('include LICENSE\n')
manifest.write('include README.rst\n')
version_specific_requirements = []
if sys.version_info < (2, 7):
version_specific_requirements += ['ordereddict']
setup(
name = 'piped',
license = 'MIT',
author = 'Piped Project Contributors',
author_email = 'piped@librelist.com',
url = 'http://piped.io',
packages = packages,
package_data = package_data,
zip_safe = False,
version = str(piped.version),
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Framework :: Twisted',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
description = 'A framework and application server with pipelines.',
long_description = """
See http://piped.io for more details.
""",
entry_points = dict(
console_scripts = [
'piped = piped.scripts:run_piped',
]
),
install_requires = version_specific_requirements +
['Twisted>=11', 'argparse', 'pyOpenSSL', 'PyYAML', 'networkx>=1.4', 'setuptools', 'mock']
)
|
morlay/shadowsocks
|
refs/heads/master
|
shadowsocks/asyncdns.py
|
655
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
andykimpe/chromium-test-npapi
|
refs/heads/master
|
tools/perf/page_sets/browser_control_click.py
|
9
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class BrowserControlClickPage(page_module.Page):
""" Why: Use a JavaScript .click() call to attach and detach a DOM tree
from a basic document.
"""
def __init__(self, page_set):
super(BrowserControlClickPage, self).__init__(
url='file://endure/browser_control_click.html',
page_set=page_set,
name='browser_control_click')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForElement('#attach')
def RunEndure(self, action_runner):
action_runner.ClickElement('#attach')
action_runner.Wait(0.5)
action_runner.ClickElement('#detach')
action_runner.Wait(0.5)
class BrowserControlClickPageSet(page_set_module.PageSet):
""" Chrome Endure control test for the browser. """
def __init__(self):
super(BrowserControlClickPageSet, self).__init__(
user_agent_type='desktop')
self.AddPage(BrowserControlClickPage(self))
|
edx/edx-analytics-data-api-client
|
refs/heads/master
|
analyticsclient/course_totals.py
|
2
|
from analyticsclient.base import PostableCourseIDsEndpoint
from analyticsclient.constants import data_formats
class CourseTotals(PostableCourseIDsEndpoint):
"""Course aggregate data."""
path = 'course_totals/'
def course_totals(self, course_ids=None, data_format=data_formats.JSON):
"""
Get aggregate data about courses.
For more detailed parameter and return type descriptions, see the
edX Analytics Data API documentation.
Arguments:
course_ids (list[str]): Course IDs to filter by.
data_format (str): Data format for response.
Must be data_format.JSON or data_format.CSV.
"""
return self.do_request(course_ids=course_ids, data={}, data_format=data_format)
|
philoniare/horizon
|
refs/heads/master
|
openstack_dashboard/test/test_data/ceilometer_data.py
|
56
|
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.v2 import meters
from ceilometerclient.v2 import resources
from ceilometerclient.v2 import samples
from ceilometerclient.v2 import statistics
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from openstack_dashboard.api import ceilometer
from openstack_dashboard.test.test_data import utils
def data(TEST):
TEST.ceilometer_users = utils.TestDataContainer()
TEST.ceilometer_tenants = utils.TestDataContainer()
TEST.resources = utils.TestDataContainer()
TEST.api_resources = utils.TestDataContainer()
TEST.samples = utils.TestDataContainer()
TEST.meters = utils.TestDataContainer()
TEST.statistics = utils.TestDataContainer()
TEST.global_disk_usages = utils.TestDataContainer()
TEST.global_network_usages = utils.TestDataContainer()
TEST.global_network_traffic_usages = utils.TestDataContainer()
TEST.global_object_store_usages = utils.TestDataContainer()
TEST.statistics_array = utils.TestDataContainer()
# users
ceilometer_user_dict1 = {'id': "1",
'name': 'user',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
ceilometer_user_dict2 = {'id': "2",
'name': 'user2',
'email': 'test2@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
TEST.ceilometer_users.add(users.User(None,
ceilometer_user_dict1))
TEST.ceilometer_users.add(users.User(None,
ceilometer_user_dict2))
# Tenants.
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2'}
ceilometer_tenant = tenants.Tenant(tenants.TenantManager,
tenant_dict)
ceilometer_disabled_tenant = tenants.Tenant(tenants.TenantManager,
tenant_dict_2)
ceilometer_tenant_unicode = tenants.Tenant(tenants.TenantManager,
tenant_dict_3)
TEST.ceilometer_tenants.add(ceilometer_tenant,
ceilometer_disabled_tenant,
ceilometer_tenant_unicode)
# resources
resource_dict_1 = dict(
resource_id='fake_resource_id',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'storage.objects'}],
)
resource_dict_2 = dict(
resource_id='fake_resource_id2',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'storage.objects'}],
)
resource_dict_3 = dict(
resource_id='fake_resource_id3',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'instance'}],
)
resource_dict_4 = dict(
resource_id='fake_resource_id3',
project_id='fake_project_id',
user_id="fake_user_id",
timestamp='2012-07-02T10:42:00.000000',
metadata={'tag': 'self.counter3', 'display_name': 'test-server'},
links=[{'url': 'test_url', 'rel': 'memory'}],
)
resource_1 = resources.Resource(resources.ResourceManager(None),
resource_dict_1)
resource_2 = resources.Resource(resources.ResourceManager(None),
resource_dict_2)
resource_3 = resources.Resource(resources.ResourceManager(None),
resource_dict_3)
resource_4 = resources.Resource(resources.ResourceManager(None),
resource_dict_4)
TEST.resources.add(resource_1)
TEST.resources.add(resource_2)
TEST.resources.add(resource_3)
# Having a separate set of fake objects for openstack_dashboard
# api Resource class. This is required because of additional methods
# defined in openstack_dashboard.api.ceilometer.Resource
api_resource_1 = ceilometer.Resource(resource_1)
api_resource_2 = ceilometer.Resource(resource_2)
api_resource_3 = ceilometer.Resource(resource_3)
api_resource_4 = ceilometer.Resource(resource_4)
TEST.api_resources.add(api_resource_1)
TEST.api_resources.add(api_resource_2)
TEST.api_resources.add(api_resource_3)
TEST.api_resources.add(api_resource_4)
# samples
sample_dict_1 = {'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'counter_name': 'image',
'counter_type': 'gauge',
'counter_unit': 'image',
'counter_volume': 1,
'timestamp': '2012-12-21T11:00:55.000000',
'metadata': {'name1': 'value1', 'name2': 'value2'},
'message_id': 'fake_message_id'}
sample_dict_2 = {'resource_id': 'fake_resource_id2',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'counter_name': 'image',
'counter_type': 'gauge',
'counter_unit': 'image',
'counter_volume': 1,
'timestamp': '2012-12-21T11:00:55.000000',
'metadata': {'name1': 'value1', 'name2': 'value2'},
'message_id': 'fake_message_id'}
sample_1 = samples.Sample(samples.SampleManager(None), sample_dict_1)
sample_2 = samples.Sample(samples.SampleManager(None), sample_dict_2)
TEST.samples.add(sample_1)
TEST.samples.add(sample_2)
# meters
meter_dict_1 = {'name': 'instance',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_2 = {'name': 'instance',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_3 = {'name': 'disk.read.bytes',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_dict_4 = {'name': 'disk.write.bytes',
'type': 'gauge',
'unit': 'instance',
'resource_id': 'fake_resource_id',
'project_id': 'fake_project_id',
'user_id': 'fake_user_id'}
meter_1 = meters.Meter(meters.MeterManager(None), meter_dict_1)
meter_2 = meters.Meter(meters.MeterManager(None), meter_dict_2)
meter_3 = meters.Meter(meters.MeterManager(None), meter_dict_3)
meter_4 = meters.Meter(meters.MeterManager(None), meter_dict_4)
TEST.meters.add(meter_1)
TEST.meters.add(meter_2)
TEST.meters.add(meter_3)
TEST.meters.add(meter_4)
# statistic
statistic_dict_1 = {'min': 1,
'max': 9,
'avg': 4.55,
'sum': 45,
'count': 10,
'duration_start': '2012-12-21T11:00:55.000000',
'duration_end': '2012-12-21T11:00:55.000000',
'period': 7200,
'period_start': '2012-12-21T11:00:55.000000',
'period_end': '2012-12-21T11:00:55.000000'}
statistic_1 = statistics.Statistics(statistics.StatisticsManager(None),
statistic_dict_1)
TEST.statistics.add(statistic_1)
|
912/M-new
|
refs/heads/master
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/models.py
|
3
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
jvassev/dd-agent
|
refs/heads/master
|
utils/debug.py
|
14
|
# stdlib
from functools import wraps
from pprint import pprint
import inspect
import os
import sys
# datadog
from config import get_checksd_path, get_confd_path
from util import get_os
def log_exceptions(logger):
"""
A decorator that catches any exceptions thrown by the decorated function and
logs them along with a traceback.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception:
logger.exception(
u"Uncaught exception while running {0}".format(func.__name__)
)
raise
return result
return wrapper
return decorator
def run_check(name, path=None):
"""
Test custom checks on Windows.
"""
# Read the config file
confd_path = path or os.path.join(get_confd_path(get_os()), '%s.yaml' % name)
try:
f = open(confd_path)
except IOError:
raise Exception('Unable to open configuration at %s' % confd_path)
config_str = f.read()
f.close()
# Run the check
check, instances = get_check(name, config_str)
if not instances:
raise Exception('YAML configuration returned no instances.')
for instance in instances:
check.check(instance)
if check.has_events():
print "Events:\n"
pprint(check.get_events(), indent=4)
print "Metrics:\n"
pprint(check.get_metrics(), indent=4)
def get_check(name, config_str):
from checks import AgentCheck
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for name, clsmember in classes:
if AgentCheck in clsmember.__bases__:
check_class = clsmember
break
if check_class is None:
raise Exception("Unable to import check %s. Missing a class that inherits AgentCheck" % name)
agentConfig = {
'version': '0.1',
'api_key': 'tota'
}
return check_class.from_yaml(yaml_text=config_str, check_name=name,
agentConfig=agentConfig)
|
psobot/SampleScanner
|
refs/heads/master
|
lib/audio_helpers.py
|
1
|
import time
import numpy
from utils import note_name, percent_to_db
from record import record
from constants import CLIPPING_THRESHOLD, \
CLIPPING_CHECK_NOTE, \
EXIT_ON_CLIPPING, \
SAMPLE_RATE
from midi_helpers import all_notes_off, CHANNEL_OFFSET
def generate_sample(
limit,
midiout,
note,
velocity,
midi_channel,
threshold,
print_progress=False,
audio_interface_name=None,
sample_rate=SAMPLE_RATE,
):
all_notes_off(midiout, midi_channel)
def after_start():
midiout.send_message([
CHANNEL_OFFSET + midi_channel, note, velocity
])
def on_time_up():
midiout.send_message([
CHANNEL_OFFSET + midi_channel, note, 0
])
return True # Get the release after keyup
return record(
limit=limit,
after_start=after_start,
on_time_up=on_time_up,
threshold=threshold,
print_progress=print_progress,
audio_interface_name=audio_interface_name,
sample_rate=sample_rate,
)
def sample_threshold_from_noise_floor(bit_depth, audio_interface_name):
time.sleep(1)
print "Sampling noise floor..."
sample_width, data, release_time = record(
limit=2.0,
after_start=None,
on_time_up=None,
threshold=0.1,
print_progress=True,
allow_empty_return=True,
audio_interface_name=audio_interface_name,
)
noise_floor = (
numpy.amax(numpy.absolute(data)) /
float(2 ** (bit_depth - 1))
)
print "Noise floor has volume %8.8f dBFS" % percent_to_db(noise_floor)
threshold = noise_floor * 1.1
print "Setting threshold to %8.8f dBFS" % percent_to_db(threshold)
return threshold
def check_for_clipping(
midiout,
midi_channel,
threshold,
bit_depth,
audio_interface_name,
):
time.sleep(1)
print "Checking for clipping and balance on note %s..." % (
note_name(CLIPPING_CHECK_NOTE)
)
sample_width, data, release_time = generate_sample(
limit=2.0,
midiout=midiout,
note=CLIPPING_CHECK_NOTE,
velocity=127,
midi_channel=midi_channel,
threshold=threshold,
print_progress=True,
audio_interface_name=audio_interface_name,
)
if data is None:
raise Exception(
"Can't check for clipping because all we recorded was silence.")
max_volume = (
numpy.amax(numpy.absolute(data)) /
float(2 ** (bit_depth - 1))
)
# All notes off, but like, a lot, again
for _ in xrange(0, 2):
all_notes_off(midiout, midi_channel)
print "Maximum volume is around %8.8f dBFS" % percent_to_db(max_volume)
if max_volume >= CLIPPING_THRESHOLD:
print "Clipping detected (%2.2f dBFS >= %2.2f dBFS) at max volume!" % (
percent_to_db(max_volume), percent_to_db(CLIPPING_THRESHOLD)
)
if EXIT_ON_CLIPPING:
raise ValueError("Clipping detected at max volume!")
# TODO: Finish implementing left/right balance check.
#
# max_volume_per_channel = [(
# numpy.amax(numpy.absolute(data)) /
# float(2 ** (bit_depth - 1))
# ) for channel in data]
# avg_volume = (
# float(sum(max_volume_per_channel)) /
# float(len(max_volume_per_channel))
# )
# print 'avg', avg_volume
# print 'max', max_volume_per_channel
# volume_diff_per_channel = [
# float(x) / avg_volume for x in max_volume_per_channel
# ]
# if any([x > VOLUME_DIFF_THRESHOLD for x in volume_diff_per_channel]):
# print "Balance is skewed! Expected 50/50 volume, got %2.2f/%2.2f" % (
# volume_diff_per_channel[0] * 100,
# volume_diff_per_channel[1] * 100,
# )
# if EXIT_ON_BALANCE_BAD:
# raise ValueError("Balance skewed!")
# time.sleep(1)
def fundamental_frequency(list, sampling_rate=1):
w = numpy.fft.rfft(list)
freqs = numpy.fft.fftfreq(len(w))
# Find the peak in the coefficients
# idx = numpy.argmax(numpy.abs(w[:len(w) / 2]))
idx = numpy.argmax(numpy.abs(w))
freq = freqs[idx]
return abs(freq * sampling_rate)
|
cinashp/whatsappbot
|
refs/heads/master
|
yowsup/layers/stanzaregulator/layer.py
|
18
|
from yowsup.layers import YowLayer, YowLayerEvent, EventCallback
from yowsup.layers.network import YowNetworkLayer
class YowStanzaRegulator(YowLayer):
'''
send: bytearray -> bytearray
receive: bytearray -> bytearray
'''
def __init__(self):
super(YowStanzaRegulator, self).__init__()
self.buf = bytearray()
self.enabled = False
@EventCallback(YowNetworkLayer.EVENT_STATE_CONNECTED)
def onConnected(self, yowLayerEvent):
self.enabled = True
self.buf = bytearray()
@EventCallback(YowNetworkLayer.EVENT_STATE_DISCONNECTED)
def onDisconnected(self, yowLayerEvent):
self.enabled = False
def send(self, data):
self.toLower(data)
def receive(self, data):
if self.enabled:
self.buf.extend(data)
self.processReceived()
else:
self.toLower(data)
def processReceived(self):
metaData = self.buf[:3]
recvData = self.buf[3:]
firstByte = metaData[0]
stanzaFlag = (firstByte & 0xF0) >> 4
stanzaSize = ((metaData[1] << 8) + metaData[2]) | ((firstByte & 0x0F) << 16)
if len(recvData) < stanzaSize:
#will in leave in buf till receive remaining data
return
oneMessageData = metaData + recvData[:stanzaSize]
self.buf = self.buf[len(oneMessageData):]
self.toUpper(oneMessageData)
if len(self.buf) > 3: #min required if has processable data yet
self.processReceived()
def __str__(self):
return "Stanza Regulator Layer"
|
andialbrecht/sentry-comments
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# sentry-comments documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 25 10:43:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sentry-comments'
copyright = u'2012, Andi Albrecht'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sentry-commentsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sentry-comments.tex', u'sentry-comments Documentation',
u'Andi Albrecht', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sentry-comments', u'sentry-comments Documentation',
[u'Andi Albrecht'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sentry-comments', u'sentry-comments Documentation',
u'Andi Albrecht', 'sentry-comments', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
flipjack/django
|
refs/heads/master
|
oscar/apps/dashboard/shipping/config.py
|
58
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ShippingDashboardConfig(AppConfig):
label = 'shipping_dashboard'
name = 'oscar.apps.dashboard.shipping'
verbose_name = _('Shipping dashboard')
|
mt2d2/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
f3r/scikit-learn
|
refs/heads/master
|
sklearn/cluster/tests/test_spectral.py
|
262
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
tersmitten/ansible
|
refs/heads/devel
|
lib/ansible/modules/crypto/acme/acme_challenge_cert_helper.py
|
17
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: acme_challenge_cert_helper
author: "Felix Fontein (@felixfontein)"
version_added: "2.7"
short_description: Prepare certificates required for ACME challenges such as C(tls-alpn-01)
description:
- "Prepares certificates for ACME challenges such as C(tls-alpn-01)."
- "The raw data is provided by the M(acme_certificate) module, and needs to be
converted to a certificate to be used for challenge validation. This module
provides a simple way to generate the required certificates."
- "The C(tls-alpn-01) implementation is based on
L(the draft-05 version of the specification,https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05)."
seealso:
- name: Automatic Certificate Management Environment (ACME)
description: The specification of the ACME protocol (RFC 8555).
link: https://tools.ietf.org/html/rfc8555
- name: ACME TLS ALPN Challenge Extension
description: The current draft specification of the C(tls-alpn-01) challenge.
link: https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05
requirements:
- "cryptography >= 1.3"
options:
challenge:
description:
- "The challenge type."
type: str
required: yes
choices:
- tls-alpn-01
challenge_data:
description:
- "The C(challenge_data) entry provided by M(acme_certificate) for the challenge."
type: dict
required: yes
private_key_src:
description:
- "Path to a file containing the private key file to use for this challenge
certificate."
- "Mutually exclusive with C(private_key_content)."
type: path
private_key_content:
description:
- "Content of the private key to use for this challenge certificate."
- "Mutually exclusive with C(private_key_src)."
type: str
'''
EXAMPLES = '''
- name: Create challenges for a given CRT for sample.com
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
challenge: tls-alpn-01
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
register: sample_com_challenge
- name: Create certificates for challenges
acme_challenge_cert_helper:
challenge: tls-alpn-01
challenge_data: "{{ item.value['tls-alpn-01'] }}"
private_key_src: /etc/pki/cert/key/sample.com.key
loop: "{{ sample_com_challenge.challenge_data | dictsort }}"
register: sample_com_challenge_certs
- name: Install challenge certificates
# We need to set up HTTPS such that for the domain,
# regular_certificate is delivered for regular connections,
# except if ALPN selects the "acme-tls/1"; then, the
# challenge_certificate must be delivered.
# This can for example be achieved with very new versions
# of NGINX; search for ssl_preread and
# ssl_preread_alpn_protocols for information on how to
# route by ALPN protocol.
...:
domain: "{{ item.domain }}"
challenge_certificate: "{{ item.challenge_certificate }}"
regular_certificate: "{{ item.regular_certificate }}"
private_key: /etc/pki/cert/key/sample.com.key
loop: "{{ sample_com_challenge_certs.results }}"
- name: Create certificate for a given CSR for sample.com
acme_certificate:
account_key_src: /etc/pki/cert/private/account.key
challenge: tls-alpn-01
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
data: "{{ sample_com_challenge }}"
'''
RETURN = '''
domain:
description:
- "The domain the challenge is for. The certificate should be provided if
this is specified in the request's the C(Host) header."
returned: always
type: str
identifier_type:
description:
- "The identifier type for the actual resource identifier. Will be C(dns)
or C(ip)."
returned: always
type: str
version_added: "2.8"
identifier:
description:
- "The identifier for the actual resource. Will be a domain name if the
type is C(dns), or an IP address if the type is C(ip)."
returned: always
type: str
version_added: "2.8"
challenge_certificate:
description:
- "The challenge certificate in PEM format."
returned: always
type: str
regular_certificate:
description:
- "A self-signed certificate for the challenge domain."
- "If no existing certificate exists, can be used to set-up
https in the first place if that is needed for providing
the challenge."
returned: always
type: str
'''
from ansible.module_utils.acme import (
ModuleFailException,
read_file,
)
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_bytes, to_text
import base64
import datetime
import sys
import traceback
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.asymmetric.utils
import cryptography.x509
import cryptography.x509.oid
import ipaddress
from distutils.version import LooseVersion
HAS_CRYPTOGRAPHY = (LooseVersion(cryptography.__version__) >= LooseVersion('1.3'))
_cryptography_backend = cryptography.hazmat.backends.default_backend()
except ImportError as e:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
HAS_CRYPTOGRAPHY = False
# Convert byte string to ASN1 encoded octet string
if sys.version_info[0] >= 3:
def encode_octet_string(octet_string):
if len(octet_string) >= 128:
raise ModuleFailException('Cannot handle octet strings with more than 128 bytes')
return bytes([0x4, len(octet_string)]) + octet_string
else:
def encode_octet_string(octet_string):
if len(octet_string) >= 128:
raise ModuleFailException('Cannot handle octet strings with more than 128 bytes')
return b'\x04' + chr(len(octet_string)) + octet_string
def main():
module = AnsibleModule(
argument_spec=dict(
challenge=dict(type='str', required=True, choices=['tls-alpn-01']),
challenge_data=dict(type='dict', required=True),
private_key_src=dict(type='path'),
private_key_content=dict(type='str', no_log=True),
),
required_one_of=(
['private_key_src', 'private_key_content'],
),
mutually_exclusive=(
['private_key_src', 'private_key_content'],
),
)
if not HAS_CRYPTOGRAPHY:
module.fail_json(msg=missing_required_lib('cryptography >= 1.3'), exception=CRYPTOGRAPHY_IMP_ERR)
try:
# Get parameters
challenge = module.params['challenge']
challenge_data = module.params['challenge_data']
# Get hold of private key
private_key_content = module.params.get('private_key_content')
if private_key_content is None:
private_key_content = read_file(module.params['private_key_src'])
else:
private_key_content = to_bytes(private_key_content)
try:
private_key = cryptography.hazmat.primitives.serialization.load_pem_private_key(private_key_content, password=None, backend=_cryptography_backend)
except Exception as e:
raise ModuleFailException('Error while loading private key: {0}'.format(e))
# Some common attributes
domain = to_text(challenge_data['resource'])
identifier_type, identifier = to_text(challenge_data.get('resource_original', 'dns:' + challenge_data['resource'])).split(':', 1)
subject = issuer = cryptography.x509.Name([])
not_valid_before = datetime.datetime.utcnow()
not_valid_after = datetime.datetime.utcnow() + datetime.timedelta(days=10)
if identifier_type == 'dns':
san = cryptography.x509.DNSName(identifier)
elif identifier_type == 'ip':
san = cryptography.x509.IPAddress(ipaddress.ip_address(identifier))
else:
raise ModuleFailException('Unsupported identifier type "{0}"'.format(identifier_type))
# Generate regular self-signed certificate
regular_certificate = cryptography.x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
private_key.public_key()
).serial_number(
cryptography.x509.random_serial_number()
).not_valid_before(
not_valid_before
).not_valid_after(
not_valid_after
).add_extension(
cryptography.x509.SubjectAlternativeName([san]),
critical=False,
).sign(
private_key,
cryptography.hazmat.primitives.hashes.SHA256(),
_cryptography_backend
)
# Process challenge
if challenge == 'tls-alpn-01':
value = base64.b64decode(challenge_data['resource_value'])
challenge_certificate = cryptography.x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
private_key.public_key()
).serial_number(
cryptography.x509.random_serial_number()
).not_valid_before(
not_valid_before
).not_valid_after(
not_valid_after
).add_extension(
cryptography.x509.SubjectAlternativeName([san]),
critical=False,
).add_extension(
cryptography.x509.UnrecognizedExtension(
cryptography.x509.ObjectIdentifier("1.3.6.1.5.5.7.1.31"),
encode_octet_string(value),
),
critical=True,
).sign(
private_key,
cryptography.hazmat.primitives.hashes.SHA256(),
_cryptography_backend
)
module.exit_json(
changed=True,
domain=domain,
identifier_type=identifier_type,
identifier=identifier,
challenge_certificate=challenge_certificate.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM),
regular_certificate=regular_certificate.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
)
except ModuleFailException as e:
e.do_fail(module)
if __name__ == '__main__':
main()
|
RPGOne/Skynet
|
refs/heads/Miho
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/datasets/tests/test_svmlight_format.py
|
5
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
shutil.copyfileobj(open(datafile, "rb"), gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
shutil.copyfileobj(open(datafile, "rb"), BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
Arcanemagus/SickRage
|
refs/heads/master
|
lib/babelfish/converters/alpha2.py
|
88
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from . import LanguageEquivalenceConverter
from ..language import LANGUAGE_MATRIX
class Alpha2Converter(LanguageEquivalenceConverter):
CASE_SENSITIVE = True
SYMBOLS = {}
for iso_language in LANGUAGE_MATRIX:
if iso_language.alpha2:
SYMBOLS[iso_language.alpha3] = iso_language.alpha2
|
wscullin/spack
|
refs/heads/qmcpack
|
var/spack/repos/builtin/packages/intel/package.py
|
3
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
from spack.environment import EnvironmentModifications
class Intel(IntelPackage):
"""Intel Compilers."""
homepage = "https://software.intel.com/en-us/intel-parallel-studio-xe"
version('18.0.0', '31ba768fba6e7322957b03feaa3add28',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12067/parallel_studio_xe_2018_composer_edition.tgz')
version('17.0.4', 'd03d351809e182c481dc65e07376d9a2',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11541/parallel_studio_xe_2017_update4_composer_edition.tgz')
version('17.0.3', '52344df122c17ddff3687f84ceb21623',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11464/parallel_studio_xe_2017_update3_composer_edition.tgz')
version('17.0.2', '2891ab1ece43eb61b6ab892f07c47f01',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11302/parallel_studio_xe_2017_update2_composer_edition.tgz')
version('17.0.1', '1f31976931ed8ec424ac7c3ef56f5e85',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/10978/parallel_studio_xe_2017_update1_composer_edition.tgz')
version('17.0.0', 'b67da0065a17a05f110ed1d15c3c6312',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9656/parallel_studio_xe_2017_composer_edition.tgz')
version('16.0.4', '2bc9bfc9be9c1968a6e42efb4378f40e',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9785/parallel_studio_xe_2016_composer_edition_update4.tgz')
version('16.0.3', '3208eeabee951fc27579177b593cefe9',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9063/parallel_studio_xe_2016_composer_edition_update3.tgz')
version('16.0.2', '1133fb831312eb519f7da897fec223fa',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/8680/parallel_studio_xe_2016_composer_edition_update2.tgz')
variant('rpath', default=True, description='Add rpath to .cfg files')
components = [
# Common files
'intel-comp-',
'intel-openmp',
# C/C++
'intel-icc',
# Fortran
'intel-ifort',
]
@property
def license_files(self):
return [
'Licenses/license.lic',
join_path('compilers_and_libraries', 'linux', 'bin',
'intel64', 'license.lic')
]
@run_after('install')
def rpath_configuration(self):
if '+rpath' in self.spec:
bin_dir = join_path(self.prefix, 'compilers_and_libraries',
'linux', 'bin', 'intel64')
lib_dir = join_path(self.prefix, 'compilers_and_libraries',
'linux', 'compiler', 'lib', 'intel64_lin')
for compiler in ['icc', 'icpc', 'ifort']:
cfgfilename = join_path(bin_dir, '{0}.cfg'.format(compiler))
with open(cfgfilename, 'w') as f:
f.write('-Xlinker -rpath -Xlinker {0}\n'.format(lib_dir))
def setup_environment(self, spack_env, run_env):
"""Adds environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ source bin/compilervars.sh intel64
"""
# NOTE: Spack runs setup_environment twice, once pre-build to set up
# the build environment, and once post-installation to determine
# the environment variables needed at run-time to add to the module
# file. The script we need to source is only present post-installation,
# so check for its existence before sourcing.
# TODO: At some point we should split setup_environment into
# setup_build_environment and setup_run_environment to get around
# this problem.
compilervars = os.path.join(self.prefix.bin, 'compilervars.sh')
if os.path.isfile(compilervars):
run_env.extend(EnvironmentModifications.from_sourcing_file(
compilervars, 'intel64'))
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/testData/inspections/ArgumentEqualDefault_after.py
|
83
|
def foo(a, b = 345, c = 1):
pass
#PY-3261
foo(1, c=22)
|
ArthurGarnier/SickRage
|
refs/heads/master
|
lib/tornado/curl_httpclient.py
|
19
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Non-blocking HTTP client implementation using pycurl."""
from __future__ import absolute_import, division, print_function
import collections
import functools
import logging
import pycurl # type: ignore
import threading
import time
from io import BytesIO
from tornado import httputil
from tornado import ioloop
from tornado import stack_context
from tornado.escape import utf8, native_str
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
curl_log = logging.getLogger('tornado.curl_httpclient')
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [self._curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
super(CurlAsyncHTTPClient, self).close()
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
try:
self._curl_setup_request(
curl, request, curl.info["buffer"],
curl.info["headers"])
except Exception as e:
# If there was an error in setup, pass it on
# to the callback. Note that allowing the
# error to escape here will appear to work
# most of the time since we are still in the
# caller's original stack frame, but when
# _process_queue() is called from
# _finish_pending_requests the exceptions have
# nowhere to go.
self._free_list.append(curl)
callback(HTTPResponse(
request=request,
code=599,
error=e))
else:
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
reason=info['headers'].get("X-Http-Reason", None),
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
def _curl_create(self):
curl = pycurl.Curl()
if curl_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12)
curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
return curl
def _curl_setup_request(self, curl, request, buffer, headers):
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
curl.setopt(pycurl.HTTPHEADER,
["%s: %s" % (native_str(k), native_str(v))
for k, v in request.headers.get_all()])
curl.setopt(pycurl.HEADERFUNCTION,
functools.partial(self._curl_header_callback,
headers, request.header_callback))
if request.streaming_callback:
def write_function(chunk):
self.io_loop.add_callback(request.streaming_callback, chunk)
else:
write_function = buffer.write
if bytes is str: # py2
curl.setopt(pycurl.WRITEFUNCTION, write_function)
else: # py3
# Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
# a fork/port. That version has a bug in which it passes unicode
# strings instead of bytes to the WRITEFUNCTION. This means that
# if you use a WRITEFUNCTION (which tornado always does), you cannot
# download arbitrary binary data. This needs to be fixed in the
# ported pycurl package, but in the meantime this lambda will
# make it work for downloading (utf8) text.
curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.decompress_response:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
if (request.proxy_auth_mode is None or
request.proxy_auth_mode == "basic"):
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC)
elif request.proxy_auth_mode == "digest":
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError(
"Unsupported proxy_auth_mode %s" % request.proxy_auth_mode)
else:
curl.setopt(pycurl.PROXY, '')
curl.unsetopt(pycurl.PROXYUSERPWD)
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
else:
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
body_expected = request.method in ("POST", "PATCH", "PUT")
body_present = request.body is not None
if not request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
if ((body_expected and not body_present) or
(body_present and not body_expected)):
raise ValueError(
'Body must %sbe None for method %s (unless '
'allow_nonstandard_methods is true)' %
('not ' if body_expected else '', request.method))
if body_expected or body_present:
if request.method == "GET":
# Even with `allow_nonstandard_methods` we disallow
# GET with a body (because libcurl doesn't allow it
# unless we use CUSTOMREQUEST). While the spec doesn't
# forbid clients from sending a body, it arguably
# disallows the server from doing anything with them.
raise ValueError('Body must be None for GET request')
request_buffer = BytesIO(utf8(request.body or ''))
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
if request.method == "POST":
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or ''))
else:
curl.setopt(pycurl.UPLOAD, True)
curl.setopt(pycurl.INFILESIZE, len(request.body or ''))
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
curl.setopt(pycurl.USERPWD, native_str(userpwd))
curl_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
curl_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if request.ssl_options is not None:
raise ValueError("ssl_options not supported in curl_httpclient")
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(self, headers, header_callback, header_line):
header_line = native_str(header_line.decode('latin1'))
if header_callback is not None:
self.io_loop.add_callback(header_callback, header_line)
# header_line as returned by curl includes the end-of-line characters.
# whitespace at the start should be preserved to allow multi-line headers
header_line = header_line.rstrip()
if header_line.startswith("HTTP/"):
headers.clear()
try:
(__, __, reason) = httputil.parse_response_start_line(header_line)
header_line = "X-Http-Reason: %s" % reason
except httputil.HTTPInputError:
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(self, debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
curl_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
curl_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
curl_log.debug('%s %r', debug_types[debug_type], debug_msg)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
|
JioCloud/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
|
146
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/docstrings/simpleGoogleDocString.py
|
53
|
def func(x, y, *args, **kwargs):
"""Summary
Parameters:
x (int) : first parameter
y: second parameter
with longer description
Raises:
Exception: if anything bad happens
Returns:
None: always
"""
pass
|
jbeezley/girder
|
refs/heads/master
|
test/test_notification.py
|
3
|
# -*- coding: utf-8 -*-
import datetime
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
OLD_TIME = datetime.datetime.utcnow() - datetime.timedelta(days=3)
SINCE = OLD_TIME + datetime.timedelta(days=1)
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc2 = model.createNotification('type', {}, user)
doc2['updated'] = OLD_TIME
doc2['time'] = OLD_TIME
model.save(doc2)
yield [doc1, doc2]
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
n1, n2 = notifications
# Make sure we get results back in chronological order
assert [n['_id'] for n in resp.json] == [str(n2['_id']), str(n1['_id'])]
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': SINCE.isoformat()})
assertStatusOk(resp)
assert len(resp.json) == 1
assert resp.json[0]['_id'] == str(notifications[0]['_id'])
def testListNotificationsAuthError(server):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
lacs-ufpa/free-mtrix
|
refs/heads/master
|
data_analysis_helpers/variability_analysis.py
|
1
|
# -*- coding: utf-8 -*-
"""
Free-mtrix - Free cultural selection and social behavior experiments.
Copyright (C) 2016-2019 Carlos Rafael Fernandes Picanço.
Copyright (C) 2016-2019 Thais Maria Monteiro Guimarães.
Copyright (C) 2016-2019 Universidade Federal do Pará.
The present file is distributed under the terms of the GNU General Public License (GPL v3.0).
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from math import factorial as f
import numpy as np
from entropy_analysis import entropy
from porcentagem_movel import load_file
from copy import deepcopy
# lines
# amarelo = [1,8]
# verde = [2,7]
# vermelho = [3,6]
# azul = [4,9]
# roxo = [5,10]
# preto = [0]
# color names
amarelo = 'amarela '
verde = 'verde '
vermelho = 'vermelha '
azul = 'azul '
roxo = 'rosa '
preto = 'preto '
COLOR_SET = {
1: [azul + verde + amarelo, 0],
2: [azul + verde + vermelho, 0],
3: [azul + verde + roxo, 0],
4: [azul + amarelo + vermelho, 0],
5: [azul + amarelo + roxo, 0],
6: [azul + vermelho + roxo, 0],
7: [amarelo + verde + vermelho, 0],
8: [amarelo + verde + roxo, 0],
9: [amarelo + roxo + vermelho, 0],
10: [vermelho + verde + roxo, 0],
11: [preto + azul + amarelo, 0],
12: [preto + azul + vermelho, 0],
13: [preto + azul + verde, 0],
14: [preto + azul + roxo, 0],
15: [preto + amarelo + vermelho, 0],
16: [preto + amarelo + verde, 0],
17: [preto + amarelo + roxo, 0],
18: [preto + vermelho + verde, 0],
19: [preto + vermelho + roxo, 0],
20: [preto + verde + roxo, 0]
}
def analyse_condition(session, c):
print(len(session['LA']))
all_data = zip(session['LA'][c[0]:c[1]], session['LB'][c[0]:c[1]],session['LC'][c[0]:c[1]])
total_count = len(session['LA'][c[0]:c[1]])
print('All combinations in condition:', total_count)
unique = set(all_data)
unique = sorted(list(unique))
unique_count = len(unique)
print('Unique combinations in condition:', unique_count)
print(unique_count/total_count*100,'%% unique')
print('')
def count_combinations(session, c, print_each_combination=False,offset=0):
"""
Count combinations from color names
"""
color_set = deepcopy(COLOR_SET)
all_data = zip(session['LAC'][c[0]:c[1]], session['LBC'][c[0]:c[1]],session['LCC'][c[0]:c[1]])
i = 0
last_a = None
for a, b, c in all_data:
i += 1
if a != last_a:
for key, value in color_set.items():
if a != b != c != a:
if a.decode() in value[0]:
if b.decode() in value[0]:
if c.decode() in value[0]:
color_set[key][1] += 1
if print_each_combination:
print(str(i+offset)+'\t'+str(key)+'\t'+str(color_set[key][1]))
last_a = a
return color_set, i+offset
if __name__ == "__main__":
session_names = [
'variability_data_mc3.txt'
]
conditions = [
(0,100),
(100,150),
(150,250),
(250,350)
]
for session_name in session_names:
print(session_name)
session = load_file(session_name, 0, 0)
offset = 0
for i, condition in enumerate(conditions):
# contagem cumulativa por ciclo
print('Condition ', i + 1, ' cumulative count per cycle')
color_set, offset = count_combinations(session, condition, True, offset)
print('\n''Condition ', i + 1, 'bins')
dtypes = []
entropy_input = []
for key, value in color_set.items():
print(key,'\t',value[1])
entropy_input.append(value[1])
# entropia relativa das combinações
print('Entropy:', entropy(entropy_input[0:10],conditions[i][1]-conditions[i][0]),'\n')
# max = 220 :
# print(f(11+3-1)/(f(3)*f(11-1)))
# unique = set()
# for session_name in session_names:
# print(session_name)
# session = load_file(session_name, 0, 0)
# for i, condition in enumerate(conditions):
# print('Condition ', i+1)
# analyse_condition(session, condition)
# all_data = zip(session['LA'], session['LB'],session['LC'])
# total_count = len(session['LA'])
# print('All combinations in session:', total_count)
# unique = set(all_data)
# unique = sorted(list(unique))
# unique_count = len(unique)
# print('Unique combinations in session:', unique_count)
# print(unique_count/total_count*100,'% unique')
# print('')
|
RevansChen/online-judge
|
refs/heads/master
|
Codewars/8kyu/cis-122-number-1-simple-printing/Python/solution1.py
|
1
|
# Python - 2.7.6
# Uncomment the following line to test this code in Python 3.0:
#xprint = print
# Comment Create Python 2.7 code for all the following Display statements.
# Display 'Hello World!'
print 'Hello World!'
# Declare String course = 'CIS 122'
# Declare String name = 'Intro to Software Design'
#
# Display 'Welcome to', course, ':', name
course = 'CIS 122'
name = 'Intro to Software Design'
print 'Welcome to ' + course + ': ' + name
# Declare Real a = 1.1
# Declare Integer b = 3
# Declare Real c
#
# Set c = a + b
#
# Display 'The sum of', a, 'and', b, 'is', c
a = 1.1
b = 3
c = a + b
print 'The sum of ' + str(a) + ' and ' + str(b) + ' is ' + str(c)
# Comment Create Python 3 code for all the following Display statements
# Display 'Hello World!'
x_print('Hello World!')
# Declare String language = 'Python'
# Declare String adjective = 'Fun'
#
# Display 'Learning', language, 'is', adjective
language = 'Python'
adjective = 'Fun'
x_print('Learning ' + language + ' is ' + adjective)
# Declare Integer pizzas = 10
# Declare Integer slices_per_pizza = 8
# Declare Integer total_slices
#
# Set total_slices = pizzas * slices_per_pizza
#
# Display 'There are', total_slices, 'slices in', pizzas, 'pizzas.'
# Display total_slices, ': It must be dinner time!'
pizzas = 10
slices_per_pizza = 8
total_slices = pizzas * slices_per_pizza
x_print('There are ' + str(total_slices) + ' slices in ' + str(pizzas) + ' pizzas.')
x_print(str(total_slices) + ': It must be dinner time!')
|
TiVoMaker/boto
|
refs/heads/develop
|
tests/unit/kms/__init__.py
|
473
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
katajakasa/utuputki2
|
refs/heads/master
|
utuputki/webui/handlers/profile.py
|
1
|
# -*- coding: utf-8 -*-
import logging
from passlib.hash import pbkdf2_sha256
from sqlalchemy.orm.exc import NoResultFound
from handlerbase import HandlerBase
from common.db import db_session, User
log = logging.getLogger(__name__)
class ProfileHandler(HandlerBase):
def handle(self, packet_msg):
if not self.is_user_auth():
return
password = packet_msg.get('password')
password2 = packet_msg.get('password2')
email = packet_msg.get('email', '')
nickname = packet_msg.get('nickname')
if len(nickname) < 5 or len(nickname) > 32:
self.send_error('Nickname should be between 5 and 32 characters long', 400)
return
if email and len(email) > 0:
if len(email) < 3 or len(email) > 128:
self.send_error('Email should be between 3 and 128 characters long', 400)
return
if (password and len(password) > 0) or (password2 and len(password2) > 0):
if (password and len(password) < 8) or (password2 and len(password2) < 8):
self.send_error('Password should be at least 8 characters long', 400)
return
if password != password2:
self.send_error('Passwords don\'t match!', 400)
return
# Make sure the nickname is not yet reserved
s = db_session()
try:
s.query(User).filter(User.nickname == nickname).filter(User.id != self.sock.uid).one()
self.send_error('Nickname is already reserved', 405)
return
except NoResultFound:
pass
finally:
s.close()
# Get user
try:
user = s.query(User).filter_by(id=self.sock.uid).one()
username = user.username
user.nickname = nickname
user.email = email
if password:
user.password = pbkdf2_sha256.encrypt(password)
s.add(user)
s.commit()
self.send_message(message={
'user': user.serialize()
})
except NoResultFound:
log.warn(u"User %d not found.", self.sock.uid)
return
finally:
s.close()
# Send simple notification
log.info(u"Updated profile for user %s.", username)
|
crossgovernmentservices/csdigital-prototype
|
refs/heads/master
|
application/skills/models.py
|
1
|
import datetime
import mongoengine as db
class Audit(db.Document):
owner = db.ReferenceField('User')
created_date = db.DateTimeField(default=datetime.datetime.utcnow)
commercial = db.IntField()
digital = db.IntField()
delivery = db.IntField()
leadership = db.IntField()
|
ibelem/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_frame-src_cross-origin_blocked_int-manual.py
|
30
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "frame-src " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_frame-src_cross-origin_blocked_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#frame-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<iframe src="support/red-100x100.png"/>
</body>
</html> """
|
jchevin/MissionPlanner-master
|
refs/heads/master
|
Lib/doctest.py
|
41
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as f:
return f.read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
if not self.buf:
# Reset it to an empty string, to make sure it's not unicode.
self.buf = ''
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
if m1 and m2 and check(m1.group(1), m2.group(1),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source
if isinstance(source, unicode):
source = source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self):
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % module.__name__
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase())
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
|
Tesora/tesora-horizon
|
refs/heads/master
|
openstack_dashboard/enabled/_1410_network_panel_group.py
|
43
|
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'network'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Network')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
|
ftomassetti/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/dontSuggestBuiltinTypeNames.py
|
166
|
"foo <caret>bar"
|
Hybrid-Cloud/Hybrid-Cloud-Patches-For-Tricircle
|
refs/heads/master
|
hybrid-cloud/neutron/agent/l3_agent.py
|
1
|
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import netaddr
import datetime
import eventlet
eventlet.monkey_patch()
import netaddr
import os
from oslo.config import cfg
from oslo import messaging
import Queue
import json
import boto.ec2
from neutron.agent.common import config
from neutron.agent import l3_ha_agent
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import processutils
from neutron.openstack.common import service
from neutron.openstack.common import timeutils
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
SNAT_INT_DEV_PREFIX = 'sg-'
FIP_NS_PREFIX = 'fip-'
SNAT_NS_PREFIX = 'snat-'
FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
FIP_EXT_DEV_PREFIX = 'fg-'
FIP_LL_SUBNET = '169.254.30.0/23'
# Route Table index for FIPs
FIP_RT_TBL = 16
# Rule priority range for FIPs
FIP_PR_START = 32768
FIP_PR_END = FIP_PR_START + 40000
RPC_LOOP_INTERVAL = 1
FLOATING_IP_CIDR_SUFFIX = '/32'
# Lower value is higher priority
PRIORITY_RPC = 0
PRIORITY_SYNC_ROUTERS_TASK = 1
DELETE_ROUTER = 1
#Route table index for vpn extra route update
VPN_RT_TBL_START = 20
VPN_RT_TBL_END = VPN_RT_TBL_START + 200
#Route table priority for vpn extra route
VPN_PR_START = 72769
VPN_PR_END = VPN_PR_START + 40000
class L3PluginApi(n_rpc.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
return self.call(context,
self.make_msg('sync_routers', host=self.host,
router_ids=router_ids))
def get_routers_for_ngfw(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
return self.call(context,
self.make_msg('sync_routers_for_ngfw', host=self.host,
router_ids=router_ids))
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise n_rpc.RemoteError: with TooManyExternalNetworks as
exc_type if there are more than one
external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host))
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
return self.call(context,
self.make_msg('update_floatingip_statuses',
router_id=router_id,
fip_statuses=fip_statuses),
version='1.1')
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
return self.call(context,
self.make_msg('get_ports_by_subnet', host=self.host,
subnet_id=subnet_id),
topic=self.topic,
version='1.2')
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
return self.call(context,
self.make_msg('get_agent_gateway_port',
network_id=fip_net, host=self.host),
topic=self.topic,
version='1.2')
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
return self.call(context,
self.make_msg('get_service_plugin_list'),
topic=self.topic,
version='1.3')
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
def get_pair(self):
"""Builds an address pair from the first and last addresses. """
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self.broadcast, self.prefixlen)))
class LinkLocalAllocator(object):
"""Manages allocation of link local IP addresses.
These link local addresses are used for routing inside the fip namespaces.
The associations need to persist across agent restarts to maintain
consistency. Without this, there is disruption in network connectivity
as the agent rewires the connections with the new IP address assocations.
Persisting these in the database is unnecessary and would degrade
performance.
"""
def __init__(self, state_file, subnet):
"""Read the file with previous allocations recorded.
See the note in the allocate method for more detail.
"""
self.state_file = state_file
subnet = netaddr.IPNetwork(subnet)
self.allocations = {}
self.remembered = {}
for line in self._read():
key, cidr = line.strip().split(',')
self.remembered[key] = LinkLocalAddressPair(cidr)
self.pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
self.pool.difference_update(self.remembered.values())
def allocate(self, key):
"""Try to allocate a link local address pair.
I expect this to work in all cases because I expect the pool size to be
large enough for any situation. Nonetheless, there is some defensive
programming in here.
Since the allocations are persisted, there is the chance to leak
allocations which should have been released but were not. This leak
could eventually exhaust the pool.
So, if a new allocation is needed, the code first checks to see if
there are any remembered allocations for the key. If not, it checks
the free pool. If the free pool is empty then it dumps the remembered
allocations to free the pool. This final desparate step will not
happen often in practice.
"""
if key in self.remembered:
self.allocations[key] = self.remembered.pop(key)
return self.allocations[key]
if not self.pool:
# Desparate times. Try to get more in the pool.
self.pool.update(self.remembered.values())
self.remembered.clear()
if not self.pool:
# More than 256 routers on a compute node!
raise RuntimeError(_("Cannot allocate link local address"))
self.allocations[key] = self.pool.pop()
self._write_allocations()
return self.allocations[key]
def release(self, key):
self.pool.add(self.allocations.pop(key))
self._write_allocations()
def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)
def _write(self, lines):
with open(self.state_file, "w") as f:
f.writelines(lines)
def _read(self):
if not os.path.exists(self.state_file):
return []
with open(self.state_file) as f:
return f.readlines()
class RouterInfo(l3_ha_agent.RouterMixin):
def __init__(self, router_id, root_helper, use_namespaces, router,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.snat_ports = []
self.floating_ips = set()
self.floating_ips_dict = {}
self.root_helper = root_helper
self.use_namespaces = use_namespaces
# Invoke the setter for establishing initial SNAT action
self.router = router
self.ns_name = NS_PREFIX + router_id if use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=root_helper,
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.snat_iptables_manager = None
self.routes = []
# DVR Data
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.extra_route_priorities = set(range(VPN_PR_START, VPN_PR_END))
self.extra_route_rt_tables = set(range(VPN_RT_TBL_START, VPN_RT_TBL_END))
self.extra_route_map = {}
super(RouterInfo, self).__init__()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self, self._router.get('gw_port'),
*args, action=self._snat_action)
self._snat_action = None
class RouterUpdate(object):
"""Encapsulates a router update
An instance of this object carries the information necessary to prioritize
and process a request to update a router.
"""
def __init__(self, router_id, priority,
action=None, router=None, timestamp=None):
self.priority = priority
self.timestamp = timestamp
if not timestamp:
self.timestamp = timeutils.utcnow()
self.id = router_id
self.action = action
self.router = router
def __lt__(self, other):
"""Implements priority among updates
Lower numerical priority always gets precedence. When comparing two
updates of the same priority then the one with the earlier timestamp
gets procedence. In the unlikely event that the timestamps are also
equal it falls back to a simple comparison of ids meaning the
precedence is essentially random.
"""
if self.priority != other.priority:
return self.priority < other.priority
if self.timestamp != other.timestamp:
return self.timestamp < other.timestamp
return self.id < other.id
class ExclusiveRouterProcessor(object):
"""Manager for access to a router for processing
This class controls access to a router in a non-blocking way. The first
instance to be created for a given router_id is granted exclusive access to
the router.
Other instances may be created for the same router_id while the first
instance has exclusive access. If that happens then it doesn't block and
wait for access. Instead, it signals to the master instance that an update
came in with the timestamp.
This way, a thread will not block to wait for access to a router. Instead
it effectively signals to the thread that is working on the router that
something has changed since it started working on it. That thread will
simply finish its current iteration and then repeat.
This class keeps track of the last time that a router data was fetched and
processed. The timestamp that it keeps must be before when the data used
to process the router last was fetched from the database. But, as close as
possible. The timestamp should not be recorded, however, until the router
has been processed using the fetch data.
"""
_masters = {}
_router_timestamps = {}
def __init__(self, router_id):
self._router_id = router_id
if router_id not in self._masters:
self._masters[router_id] = self
self._queue = []
self._master = self._masters[router_id]
def _i_am_master(self):
return self == self._master
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._i_am_master():
del self._masters[self._router_id]
def _get_router_data_timestamp(self):
return self._router_timestamps.get(self._router_id,
datetime.datetime.min)
def fetched_and_processed(self, timestamp):
"""Records the data timestamp after it is used to update the router"""
new_timestamp = max(timestamp, self._get_router_data_timestamp())
self._router_timestamps[self._router_id] = new_timestamp
def queue_update(self, update):
"""Queues an update from a worker
This is the queue used to keep new updates that come in while a router
is being processed. These updates have already bubbled to the front of
the RouterProcessingQueue.
"""
self._master._queue.append(update)
def updates(self):
"""Processes the router until updates stop coming
Only the master instance will process the router. However, updates may
come in from other workers while it is in progress. This method loops
until they stop coming.
"""
if self._i_am_master():
while self._queue:
# Remove the update from the queue even if it is old.
update = self._queue.pop(0)
# Process the update only if it is fresh.
if self._get_router_data_timestamp() < update.timestamp:
yield update
class RouterProcessingQueue(object):
"""Manager of the queue of routers to process."""
def __init__(self):
self._queue = Queue.PriorityQueue()
def add(self, update):
self._queue.put(update)
def each_update_to_next_router(self):
"""Grabs the next router from the queue and processes
This method uses a for loop to process the router repeatedly until
updates stop bubbling to the front of the queue.
"""
next_update = self._queue.get()
with ExclusiveRouterProcessor(next_update.id) as rp:
# Queue the update whether this worker is the master or not.
rp.queue_update(next_update)
# Here, if the current worker is not the master, the call to
# rp.updates() will not yield and so this will essentially be a
# noop.
for update in rp.updates():
yield (rp, update)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
l3_ha_agent.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
Needed by the L3 service when dealing with DVR
"""
RPC_API_VERSION = '1.2'
OPTS = [
cfg.StrOpt('agent_mode', default='legacy',
help=_("The working mode for the agent. Allowed modes are: "
"'legacy' - this preserves the existing behavior "
"where the L3 agent is deployed on a centralized "
"networking node to provide L3 services like DNAT, "
"and SNAT. Use this mode if you do not want to "
"adopt DVR. 'dvr' - this mode enables DVR "
"functionality and must be used for an L3 agent "
"that runs on a compute host. 'dvr_snat' - this "
"enables centralized SNAT support in conjunction "
"with DVR. This mode must be used for an L3 agent "
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, if "
"less than or equal to 0, the feature is disabled")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=False,
help=_("Delete namespace after removing a router.")),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
cfg.BoolOpt('fip_centralized',
default='False',
help=_('The floatingip mode is centralized or distributed for dvr mode,'
'Allowed values are: true or false.')),
]
PROVIDER_OPTS = [
cfg.StrOpt('provider_name',
default = '',
help=_('the provider type of AZ:aws or vcloud')),
cfg.StrOpt('region',
default='',
help=_('the region for connection to EC2 in aws provider')),
cfg.StrOpt('access_key_id',
default = '',
help=_('the access key id for connection to EC2 in aws provider')),
cfg.StrOpt('secret_key',
default = '',
help=_('the secret key for connection to EC2 in aws provider')),
cfg.StrOpt('subnet_api_cidr',
default = '',
help=_('the ip cidr of api subnet in aws provider')),
cfg.StrOpt('network_interface_id',
default = '',
help=_('the api subnet interface id of network node in aws provider')),
cfg.StrOpt('gateway_ip',
default = '',
help=_('the ip address of eth on api subnet in aws provider')),
cfg.ListOpt('exclued_private_ips',
default = [],
help=_('the used ips in api subnet without elastci ip in aws provider')),
cfg.StrOpt('vcloud_config_file',
default = '/etc/neutron/vcloud.json',
help=_('the config file for vcloud')),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
self.relay_cidr = ''
self._check_config_params()
try:
self.driver = importutils.import_object(
self.conf.interface_driver,
self.conf
)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % self.conf.interface_driver
LOG.error(msg)
raise SystemExit(1)
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
self.sync_progress = False
self.provider_name = self.conf.provider_opts.provider_name
self.private_ip_map = {}
self.elastic_ip_map = {}
if self.provider_name == 'aws':
self.region = self.conf.provider_opts.region
self.access_key_id = self.conf.provider_opts.access_key_id
self.secret_key = self.conf.provider_opts.secret_key
self.subnet_api_cidr = self.conf.provider_opts.subnet_api_cidr
self.gateway_ip = self.conf.provider_opts.gateway_ip
self.network_interface_id = self.conf.provider_opts.network_interface_id
self.exclued_private_ips = self.conf.provider_opts.exclued_private_ips
self.ec2_conn = boto.ec2.connect_to_region(
self.region,
aws_access_key_id = self.access_key_id,
aws_secret_access_key = self.secret_key)
for private_ip in self.exclued_private_ips:
self.elastic_ip_map[private_ip] = ''
addresses = []
addresses = self.ec2_conn.get_all_addresses()
for address in addresses:
private_ip = address.private_ip_address
elastic_ip = address.public_ip
if private_ip:
self.private_ip_map[elastic_ip] = private_ip
self.elastic_ip_map[private_ip] = elastic_ip
self.available_private_ips = set(self._allocate_pools_for_cidr(self.subnet_api_cidr))
self.available_private_ips -= set([x for x in self.elastic_ip_map.keys()])
elif self.provider_name == 'vcloud':
f = file(self.conf.provider_opts.vcloud_config_file)
try:
vcloud_config = json.load(f)
except ValueError as e:
LOG.error(_("load vcloud json config filed %s."), self.conf.provider_opts.vcloud_config_file)
return
self.auxiliary_cidr = vcloud_config['auxiliary_cidr']
self.gateway_ip = vcloud_config['auxiliary_gateway_ip']
ip_addr = '%s/%s' % (self.gateway_ip, self.auxiliary_cidr.split('/')[1])
ip_cmd = ['ip', 'address', 'add', ip_addr, 'dev', 'external_api']
ip_wrapr = ip_lib.IPWrapper(self.root_helper)
ip_wrapr.netns.execute(ip_cmd, check_exit_code=False)
iptable_cmd = ['iptables', '-D', 'INPUT', '-d', self.auxiliary_cidr, '-i', 'external_api', '-j', 'ACCEPT']
ip_wrapr.netns.execute(iptable_cmd, check_exit_code=False)
iptable_cmd = ['iptables', '-I', 'INPUT', '5', '-d', self.auxiliary_cidr, '-i', 'external_api', '-j', 'ACCEPT']
ip_wrapr.netns.execute(iptable_cmd)
for ip_map in vcloud_config['ip_maps']:
elastic_ip = ip_map['elastic_ip']
private_ip = ip_map['private_ip']
self.private_ip_map[elastic_ip] = private_ip
self.elastic_ip_map[private_ip] = elastic_ip
self.available_private_ips = set()
# Get the list of service plugins from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
retry_count = 5
while True:
retry_count = retry_count - 1
try:
self.neutron_service_plugins = (
self.plugin_rpc.get_service_plugin_list(self.context))
except n_rpc.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service plugins '
'enabled at the neutron server when '
'startup due to RPC error. It happens '
'when the server does not support this '
'RPC API. If the error is '
'UnsupportedVersion you can ignore this '
'warning. Detail message: %s'), e)
self.neutron_service_plugins = None
except messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service '
'plugins enabled on the neutron '
'server. Retrying. '
'Detail message: %s'), e)
continue
break
self._clean_stale_namespaces = self.conf.use_namespaces
# dvr data
self.agent_gateway_port = {}
self.local_subnets = LinkLocalAllocator(
os.path.join(self.conf.state_path, 'fip-linklocal-networks'),
FIP_LL_SUBNET)
self.fip_priorities = set(range(FIP_PR_START, FIP_PR_END))
self._queue = RouterProcessingQueue()
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if not self.conf.use_namespaces and not self.conf.router_id:
msg = _('Router id is required if not using namespaces.')
LOG.error(msg)
raise SystemExit(1)
def _list_namespaces(self):
"""Get a set of all router namespaces on host
The argument routers is the list of routers that are recorded in
the database as being hosted on this node.
"""
try:
root_ip = ip_lib.IPWrapper(self.root_helper)
host_namespaces = root_ip.get_namespaces(self.root_helper)
return set(ns for ns in host_namespaces
if (ns.startswith(NS_PREFIX)
or ns.startswith(SNAT_NS_PREFIX)))
except RuntimeError:
LOG.exception(_('RuntimeError in obtaining router list '
'for namespace cleanup.'))
return set()
def _cleanup_namespaces(self, router_namespaces, router_ids):
"""Destroy stale router namespaces on host when L3 agent restarts
This routine is called when self._clean_stale_namespaces is True.
The argument router_namespaces is the list of all routers namespaces
The argument router_ids is the list of ids for known routers.
"""
ns_to_ignore = set(NS_PREFIX + id for id in router_ids)
ns_to_ignore.update(SNAT_NS_PREFIX + id for id in router_ids)
ns_to_destroy = router_namespaces - ns_to_ignore
self._destroy_stale_router_namespaces(ns_to_destroy)
def _destroy_stale_router_namespaces(self, router_namespaces):
"""Destroys the stale router namespaces
The argumenet router_namespaces is a list of stale router namespaces
As some stale router namespaces may not be able to be deleted, only
one attempt will be made to delete them.
"""
for ns in router_namespaces:
ra.disable_ipv6_ra(ns[len(NS_PREFIX):], ns, self.root_helper)
try:
self._destroy_namespace(ns)
except RuntimeError:
LOG.exception(_('Failed to destroy stale router namespace '
'%s'), ns)
self._clean_stale_namespaces = False
def _destroy_namespace(self, ns):
if ns.startswith(NS_PREFIX):
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns)
self._destroy_router_namespace(ns)
elif ns.startswith(FIP_NS_PREFIX):
self._destroy_fip_namespace(ns)
elif ns.startswith(SNAT_NS_PREFIX):
self._destroy_snat_namespace(ns)
def _delete_namespace(self, ns_ip, ns):
try:
ns_ip.netns.delete(ns)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s') % ns
LOG.exception(msg)
def _destroy_snat_namespace(self, ns):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns)
# delete internal interfaces
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(SNAT_INT_DEV_PREFIX):
LOG.debug('Unplugging DVR device %s', d.name)
self.driver.unplug(d.name, namespace=ns,
prefix=SNAT_INT_DEV_PREFIX)
# TODO(mrsmith): delete ext-gw-port
LOG.debug('DVR: destroy snat ns: %s', ns)
if self.conf.router_delete_namespaces:
self._delete_namespace(ns_ip, ns)
def _destroy_fip_namespace(self, ns):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX):
# internal link between IRs and FIP NS
ns_ip.del_veth(d.name)
elif d.name.startswith(FIP_EXT_DEV_PREFIX):
# single port from FIP NS to br-ext
# TODO(carl) Where does the port get deleted?
LOG.debug('DVR: unplug: %s', d.name)
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=ns,
prefix=FIP_EXT_DEV_PREFIX)
LOG.debug('DVR: destroy fip ns: %s', ns)
# TODO(mrsmith): add LOG warn if fip count != 0
if self.conf.router_delete_namespaces:
self._delete_namespace(ns_ip, ns)
def _destroy_router_namespace(self, ns):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=ns,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
ns_ip.del_veth(d.name)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=ns,
prefix=EXTERNAL_DEV_PREFIX)
if self.conf.router_delete_namespaces:
self._delete_namespace(ns_ip, ns)
def _create_namespace(self, name):
ip_wrapper_root = ip_lib.IPWrapper(self.root_helper)
ip_wrapper = ip_wrapper_root.ensure_namespace(name)
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
if self.use_ipv6:
ip_wrapper.netns.execute(['sysctl', '-w',
'net.ipv6.conf.all.forwarding=1'])
def _create_router_namespace(self, ri):
self._create_namespace(ri.ns_name)
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except n_rpc.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router,
use_ipv6=self.use_ipv6)
self.router_info[router_id] = ri
if self.conf.use_namespaces:
self._create_router_namespace(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].add_rule(c, r)
#set public cloud internal_relay_network iptables
for c, r in self.public_cloud_filter_rules():
ri.iptables_manager.ipv4['raw'].add_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
self.process_router_add(ri)
if ri.is_ha:
self.process_ha_router_added(ri)
if self.conf.enable_metadata_proxy:
if ri.is_ha:
self._add_keepalived_notifiers(ri)
else:
self._spawn_metadata_proxy(ri.router_id, ri.ns_name)
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_("Info for router %s were not found. "
"Skipping router removal"), router_id)
return
if ri.is_ha:
self.process_ha_router_removed(ri)
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ri.router_id, ri.ns_name)
del self.router_info[router_id]
self._destroy_router_namespace(ri.ns_name)
def _get_metadata_proxy_callback(self, router_id):
def callback(pid_file):
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
'--router_id=%s' % router_id,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%s' % self.conf.metadata_port]
#Could not print log for VRM when debug is False
if cfg.CONF.debug:
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' %
router_id))
return proxy_cmd
return callback
def _get_metadata_proxy_process_manager(self, router_id, ns_name):
return external_process.ProcessManager(
self.conf,
router_id,
self.root_helper,
ns_name)
def _spawn_metadata_proxy(self, router_id, ns_name):
callback = self._get_metadata_proxy_callback(router_id)
pm = self._get_metadata_proxy_process_manager(router_id, ns_name)
pm.enable(callback)
def _destroy_metadata_proxy(self, router_id, ns_name):
pm = self._get_metadata_proxy_process_manager(router_id, ns_name)
pm.disable()
def _set_subnet_arp_info(self, ri, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet'] or not ri.router['distributed']:
return
subnet_id = port['subnet']['id']
subnet_ports = (
self.plugin_rpc.get_ports_by_subnet(self.context,
subnet_id))
for p in subnet_ports:
if (p['device_owner'] not in (
l3_constants.DEVICE_OWNER_ROUTER_INTF,
l3_constants.DEVICE_OWNER_DVR_INTERFACE)):
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(ri, fixed_ip['ip_address'],
p['mac_address'],
subnet_id, 'add')
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def _get_existing_devices(self, ns_name):
ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper,
namespace=ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
@periodic_task.periodic_task
def check_radvd_task(self, context):
routers = self.plugin_rpc.get_routers(self.context)
for r in routers:
if r['id'] in self.router_info:
ri = self.router_info[r['id']]
ri.router = r
ra.check_processes(ri.router_id,
ri.ns_name,
ri.router.get(l3_constants.INTERFACE_KEY, []),
self.get_internal_device_name,
self.root_helper)
@common_utils.exception_logger()
def process_router(self, ri):
# TODO(mrsmith) - we shouldn't need to check here
if 'distributed' not in ri.router:
ri.router['distributed'] = False
ri.iptables_manager.defer_apply_on()
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
new_ipv6_port = False
old_ipv6_port = False
for p in new_ports:
self._set_subnet_info(p)
self.internal_network_added(ri, p)
ri.internal_ports.append(p)
self._set_subnet_arp_info(ri, p)
if (not new_ipv6_port and
netaddr.IPNetwork(p['subnet']['cidr']).version == 6):
new_ipv6_port = True
for p in old_ports:
self.internal_network_removed(ri, p)
ri.internal_ports.remove(p)
if (not old_ipv6_port and
netaddr.IPNetwork(p['subnet']['cidr']).version == 6):
old_ipv6_port = True
if new_ipv6_port or old_ipv6_port:
ra.enable_ipv6_ra(ri.router_id,
ri.ns_name,
internal_ports,
self.get_internal_device_name,
self.root_helper)
existing_devices = self._get_existing_devices(ri.ns_name)
current_internal_devs = set([n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX)])
current_port_devs = set([self.get_internal_device_name(id) for
id in current_port_ids])
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug(_('Deleting stale internal router device: %s'),
stale_dev)
self.driver.unplug(stale_dev,
namespace=ri.ns_name,
prefix=INTERNAL_DEV_PREFIX)
# TODO(salv-orlando): RouterInfo would be a better place for
# this logic too
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
ri.ex_gw_port and ri.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
ext_port_exists = self._check_external_port_exists(
ri.router_id, ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
self._set_subnet_info(ex_gw_port)
if ri.router['distributed']:
self._del_external_port_from_qrouter_if_exists(ri.ns_name, interface_name)
# For DVR we need to verify the port actually exists on this
# host since the ri.ex_gw_port may exist in the cache, but
# SNAT data is routed to the remote SNAT host until the
# host is changed.
if (not ri.ex_gw_port or (ri.router['distributed'] and
not ext_port_exists)):
self.external_gateway_added(ri, ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, ri.ex_gw_port) or \
(ri.router['distributed'] and self.get_gw_port_host(ri.router) is None and ext_port_exists):
self.external_gateway_updated(ri, ex_gw_port, interface_name, ext_port_exists)
if (ri.router['distributed'] and self.conf.agent_mode.startswith('dvr_snat')
and self.get_gw_port_host(ri.router) == self.host):
ns_name = self.get_snat_ns_name(ri.router['id'])
existing_devices += self._get_existing_devices(ns_name)
elif not ex_gw_port and ri.ex_gw_port:
self.external_gateway_removed(ri, ri.ex_gw_port, interface_name, ext_port_exists)
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug(_('Deleting stale external router device: %s'),
stale_dev)
if (ri.router['distributed'] and self.conf.agent_mode.startswith('dvr_snat')
and self.get_gw_port_host(ri.router) == self.host):
ns_name = self.get_snat_ns_name(ri.router['id'])
else:
ns_name = ri.ns_name
self.driver.unplug(stale_dev,
bridge=self.conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Process static routes for router
try:
self.routes_updated(ri)
except Exception, e:
LOG.error("update routes failed %s", e)
# Process SNAT rules for external gateway
if (not ri.router['distributed'] or
ex_gw_port and self.get_gw_port_host(ri.router) == self.host):
# Get IPv4 only internal CIDRs
internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports
if netaddr.IPNetwork(p['ip_cidr']).version == 4]
ri.perform_snat_action(self._handle_router_snat_rules,
internal_cidrs, interface_name)
# Process SNAT/DNAT rules for floating IPs
fip_statuses = {}
try:
if ex_gw_port and (not self.conf.fip_centralized or \
self.conf.fip_centralized and not self.conf.agent_mode == 'dvr'):
existing_floating_ips = ri.floating_ips
floating_ips = self.get_floating_ips(ri)
if ri.router['distributed'] and not self.conf.fip_centralized:
self.create_dvr_fip_interfaces(ri, ex_gw_port,
floating_ips)
self.process_router_floating_ip_nat_rules(ri)
ri.iptables_manager.defer_apply_off()
# Once NAT rules for floating IPs are safely in place
# configure their addresses on the external gateway port
fip_statuses = self.process_router_floating_ip_addresses(
ri, ex_gw_port)
except Exception:
# TODO(salv-orlando): Less broad catching
# All floating IPs must be put in error state
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
if ex_gw_port and (not self.conf.fip_centralized or \
self.conf.fip_centralized and not self.conf.agent_mode == 'dvr'):
# Identify floating IPs which were disabled
ri.floating_ips = set(fip_statuses.keys())
for fip_id in existing_floating_ips - ri.floating_ips:
fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
# Update ex_gw_port and enable_snat on the router info cache
ri.ex_gw_port = ex_gw_port
ri.snat_ports = snat_ports
ri.enable_snat = ri.router.get('enable_snat')
if ri.is_ha:
if ri.ha_port:
ri.spawn_keepalived()
else:
ri.disable_keepalived()
def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs,
interface_name, action):
# Remove all the rules
# This is safe because if use_namespaces is set as False
# then the agent can only configure one router, otherwise
# each router's SNAT rules will be in their own namespace
if not ri.router['distributed']:
iptables_manager = ri.iptables_manager
elif ri.snat_iptables_manager:
iptables_manager = ri.snat_iptables_manager
else:
LOG.debug("DVR router: no snat rules to be handled")
return
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
if not ri.router['distributed'] or (ri.router['distributed'] and self.conf.fip_centralized):
# Add back the jump to float-snat
iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if self.provider_name in ['aws','vcloud']:
ex_gw_ip = self.private_ip_map.get(ex_gw_ip)
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
internal_cidrs,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
break
iptables_manager.apply()
def _handle_router_fip_nat_rules(self, ri, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
ri.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
ri.iptables_manager.ipv4['nat'].add_rule(*rule)
ri.iptables_manager.apply()
def process_router_floating_ip_nat_rules(self, ri):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
if self.conf.fip_centralized and ri.snat_iptables_manager:
iptables_manager = ri.snat_iptables_manager
else:
iptables_manager = ri.iptables_manager
iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips(ri)
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
if ri.router['distributed'] and self.conf.fip_centralized:
iptables_manager.ipv4['nat'].add_rule(chain, rule,
top=True, tag='floating_ip')
else:
iptables_manager.ipv4['nat'].add_rule(chain, rule, tag='floating_ip')
iptables_manager.apply()
def create_dvr_fip_interfaces(self, ri, ex_gw_port, floating_ips):
if not floating_ips :
return
fip_net_id = floating_ips[0].get('floating_network_id', None)
if self.agent_gateway_port.get(fip_net_id, None) is None:
self._create_agent_gateway_port(ri, fip_net_id)
if self.agent_gateway_port.get(fip_net_id, None):
self.create_rtr_2_fip_link(ri, fip_net_id)
def _get_external_device_interface_name(self, ri, ex_gw_port,
floating_ips):
if ri.router['distributed'] and not self.conf.fip_centralized:
if self.agent_gateway_port.get(ex_gw_port.get('network_id', None), None):
return self.get_rtr_int_device_name(ri.router_id)
else:
return self.get_external_device_name(ex_gw_port['id'])
def _add_floating_ip(self, ri, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
if self.provider_name == 'aws':
private_ip = fip_ip
elastic_ip = self.elastic_ip_map.get(private_ip)
addresses = self.ec2_conn.get_all_addresses(addresses = [elastic_ip])
if not addresses:
LOG.error(_('elastic_ip %s does not exist in aws'), elastic_ip)
return
allocation_id = addresses[0].allocation_id
association_id = addresses[0].association_id
if not association_id:
if not self.ec2_conn.assign_private_ip_addresses(network_interface_id = self.network_interface_id,
private_ip_addresses = [private_ip]):
LOG.error(_('assign_private_ip_addresses %s failed'), private_ip)
return
if not self.ec2_conn.associate_address(network_interface_id = self.network_interface_id,
allocation_id = allocation_id,
private_ip_address = private_ip):
LOG.error(_('associate_address %(elastic_ip)s to %(private_ip)s failed'),
{"elastic_ip": elastic_ip, "private_ip": private_ip})
return
elif self.provider_name == 'vcloud':
#floating ip has been convert to auxilarity ip, and edge has been configured
pass
if ri.is_ha:
self._add_vip(ri, ip_cidr, interface_name)
else:
net = netaddr.IPNetwork(ip_cidr)
try:
device.addr.add(net.version, ip_cidr, str(net.broadcast))
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError):
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
return l3_constants.FLOATINGIP_STATUS_ERROR
if ri.router['distributed'] and not self.conf.fip_centralized:
# Special Handling for DVR - update FIP namespace
# and ri.namespace to handle DVR based FIP
self.floating_ip_added_dist(ri, fip)
elif ri.router['distributed'] and self.conf.fip_centralized:
self._send_gratuitous_arp_packet(
self.get_snat_ns_name(ri.router['id']), interface_name, fip_ip)
else:
# As GARP is processed in a distinct thread the call below
# won't raise an exception to be handled.
self._send_gratuitous_arp_packet(
ri.ns_name, interface_name, fip_ip)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def _remove_floating_ip(self, ri, device, ip_cidr):
if self.provider_name == 'aws':
private_ip = ip_cidr.split('/')[0]
elastic_ip = self.elastic_ip_map.get(private_ip)
addresses = self.ec2_conn.get_all_addresses(addresses = [elastic_ip])
if not addresses:
LOG.error(_('elastic_ip %s does not exist in aws'), elastic_ip)
return
association_id = addresses[0].association_id
if association_id:
self.ec2_conn.disassociate_address(association_id = association_id)
self.ec2_conn.unassign_private_ip_addresses(self.network_interface_id, private_ip)
self.reclaim_private_ip(elastic_ip)
elif self.provider_name == 'vcloud':
#floating ip has been convert to auxilarity ip, and edge has been configured
pass
if ri.is_ha:
self._remove_vip(ri, ip_cidr)
else:
net = netaddr.IPNetwork(ip_cidr)
device.addr.delete(net.version, ip_cidr)
if not self.conf.fip_centralized:
self.driver.delete_conntrack_state(root_helper=self.root_helper,
namespace=ri.ns_name,
ip=ip_cidr)
if ri.router['distributed'] and not self.conf.fip_centralized:
self.floating_ip_removed_dist(ri, ip_cidr)
def process_router_floating_ip_addresses(self, ri, ex_gw_port):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
floating_ips = self.get_floating_ips(ri)
interface_name = self._get_external_device_interface_name(
ri, ex_gw_port, floating_ips)
if interface_name is None:
return fip_statuses
if ri.router['distributed'] and self.conf.fip_centralized:
ns_name = self.get_snat_ns_name(ri.router['id'])
else:
ns_name = ri.ns_name
if not ip_lib.device_exists(interface_name, self.root_helper,
namespace=ns_name):
return fip_statuses
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ns_name)
existing_cidrs = set([addr['cidr'] for addr in device.addr.list()])
new_cidrs = set()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self._add_floating_ip(
ri, fip, interface_name, device)
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs if
ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX))
for ip_cidr in fips_to_remove:
self._remove_floating_ip(ri, device, ip_cidr)
return fip_statuses
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _arping(self, ns_name, interface_name, ip_address, distributed=False):
if distributed:
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ns_name)
ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX
net = netaddr.IPNetwork(ip_cidr)
device.addr.add(net.version, ip_cidr, str(net.broadcast))
arping_cmd = ['arping', '-A',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
ip_address]
try:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e))
if distributed:
device.addr.delete(net.version, ip_cidr)
def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address,
distributed=False):
if self.conf.send_arp_for_ha > 0:
eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address,
distributed)
def get_internal_port(self, ri, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _del_external_port_from_qrouter_if_exists(self, ns_name, interface_name):
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ns_name):
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _check_external_port_exists(self, router_id, port_id):
"""Return True if external gateway port is present."""
if self.conf.agent_mode == 'dvr_snat':
return (ip_lib.device_exists(
self.get_external_device_name(port_id),
root_helper=self.root_helper,
namespace=self.get_snat_ns_name(router_id)))
return True
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_fip_ext_device_name(self, port_id):
return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_rtr_int_device_name(self, router_id):
return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def get_fip_int_device_name(self, router_id):
return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def get_snat_int_device_name(self, port_id):
return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_fip_ns_name(self, ext_net_id):
return (FIP_NS_PREFIX + ext_net_id)
def get_snat_ns_name(self, router_id):
return (SNAT_NS_PREFIX + router_id)
def get_snat_interfaces(self, ri):
return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_gw_port_host(self, router):
host = router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
router['id'])
return host
def get_floating_ips(self, ri):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
if ri.router['distributed'] and not self.conf.fip_centralized:
floating_ips = [i for i in floating_ips if i['host'] == self.host]
if self.provider_name in ['aws', 'vcloud']:
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
#whether convert to private ip or not, if converted, fip_ip stored private ip
#if not converted,code make sure convert to same private ip as far as possible
if self.elastic_ip_map.get(fip_ip):
continue
self.provision_private_ip(fip_ip)
pirvate_ip = self.private_ip_map[fip_ip]
fip['floating_ip_address'] = pirvate_ip
return floating_ips
def _map_internal_interfaces(self, ri, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_('DVR: no map match_port found!'))
def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name,
snat_ports):
"""Create SNAT namespace."""
snat_ns_name = self.get_snat_ns_name(ri.router['id'])
self._create_namespace(snat_ns_name)
# connect snat_ports to br_int from SNAT namespace
for port in snat_ports:
# create interface_name
self._set_subnet_info(port)
interface_name = self.get_snat_int_device_name(port['id'])
self._internal_network_added(snat_ns_name, port['network_id'],
port['id'], port['ip_cidr'],
port['mac_address'], interface_name,
SNAT_INT_DEV_PREFIX)
preserve_ips = []
if ri.router['distributed'] and self.conf.fip_centralized:
floating_ips = self.get_floating_ips(ri)
preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX
for ip in floating_ips]
self._external_gateway_added(ri, ex_gw_port, gw_interface_name,
snat_ns_name, preserve_ips=preserve_ips)
ri.snat_iptables_manager = iptables_manager.IptablesManager(
root_helper=self.root_helper,
namespace=snat_ns_name,
use_ipv6=self.use_ipv6)
# kicks the FW Agent to add rules for the snat namespace
self.process_router_add(ri)
def external_gateway_added(self, ri, ex_gw_port, interface_name):
if ri.router['distributed']:
ip_wrapr = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name)
ip_wrapr.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.send_redirects=0'])
snat_ports = self.get_snat_interfaces(ri)
for p in ri.internal_ports:
gateway = self._map_internal_interfaces(ri, p, snat_ports)
id_name = self.get_internal_device_name(p['id'])
if gateway:
self._snat_redirect_add(ri, gateway['fixed_ips'][0]
['ip_address'], p, id_name)
if (self.conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host(ri.router) == self.host):
self._create_dvr_gateway(ri, ex_gw_port, interface_name,
snat_ports)
for port in snat_ports:
for ip in port['fixed_ips']:
self._update_arp_entry(ri, ip['ip_address'],
port['mac_address'],
ip['subnet_id'], 'add')
return
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips(ri)
preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX
for ip in floating_ips]
self._external_gateway_added(ri, ex_gw_port, interface_name,
ri.ns_name, preserve_ips)
if ri.is_ha:
self._ha_external_gateway_added(ri, ex_gw_port, interface_name)
def external_gateway_updated(self, ri, ex_gw_port, interface_name, ext_port_exists):
preserve_ips = []
if ri.router['distributed']:
if (self.conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host(ri.router) == self.host):
ns_name = self.get_snat_ns_name(ri.router['id'])
if self.conf.fip_centralized:
floating_ips = self.get_floating_ips(ri)
preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX
for ip in floating_ips]
elif self.get_gw_port_host(ri.router) is None and (
ext_port_exists):
# snat move case, remove from this node
self.external_gateway_removed(ri, ex_gw_port, interface_name, ext_port_exists)
return
else:
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", ri.router['id'])
return
else:
ns_name = ri.ns_name
floating_ips = self.get_floating_ips(ri)
preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX
for ip in floating_ips]
self._external_gateway_added(ri, ex_gw_port, interface_name,
ns_name, preserve_ips)
if ri.is_ha:
self._ha_external_gateway_updated(ri, ex_gw_port, interface_name)
def _external_gateway_added(self, ri, ex_gw_port, interface_name,
ns_name, preserve_ips):
if self.provider_name == 'aws':
elastic_ip = ex_gw_port['ip_cidr'].split('/')[0]
addresses = self.ec2_conn.get_all_addresses(addresses = [elastic_ip])
if not addresses:
LOG.error(_('elastic_ip %s does not exist in aws'), elastic_ip)
return
allocation_id = addresses[0].allocation_id
association_id = addresses[0].association_id
if elastic_ip not in self.private_ip_map:
if not association_id:
self.provision_private_ip(elastic_ip)
else:
LOG.error(_('ip %s association with private ip in aws but not in private_ip_map'), elastic_ip)
return
private_ip = self.private_ip_map[elastic_ip]
private_gw_port = {}
private_gw_port['ip_cidr'] = '%s/%s' % (private_ip, self.subnet_api_cidr.split('/')[1])
if not association_id:
if not self.ec2_conn.assign_private_ip_addresses(network_interface_id = self.network_interface_id,
private_ip_addresses = [private_ip]):
LOG.error(_('assign_private_ip_addresses %s failed'), private_ip)
return
if not self.ec2_conn.associate_address(network_interface_id = self.network_interface_id,
allocation_id = allocation_id,
private_ip_address = private_ip):
LOG.error(_('associate_address %(elastic_ip)s to %(private_ip)s failed'),
{"elastic_ip": elastic_ip, "private_ip": private_ip})
return
elif self.provider_name == 'vcloud':
elastic_ip = ex_gw_port['ip_cidr'].split('/')[0]
if elastic_ip not in self.private_ip_map:
self.provision_private_ip(elastic_ip)
private_ip = self.private_ip_map[elastic_ip]
private_gw_port = {}
private_gw_port['ip_cidr'] = '%s/%s' % (private_ip, self.auxiliary_cidr.split('/')[1])
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
if self.provider_name in ['aws', 'vcloud']:
if not ri.is_ha:
self.driver.init_l3(
interface_name, [private_gw_port['ip_cidr']], namespace=ns_name,
gateway=self.gateway_ip,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips)
ip_address = private_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ns_name,
interface_name, ip_address)
else:
if not ri.is_ha:
self.driver.init_l3(
interface_name, [ex_gw_port['ip_cidr']], namespace=ns_name,
gateway=ex_gw_port['subnet'].get('gateway_ip'),
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips)
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ns_name,
interface_name, ip_address)
def agent_gateway_added(self, ns_name, ex_gw_port,
interface_name):
"""Add Floating IP gateway port to FIP namespace."""
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ns_name,
prefix=FIP_EXT_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ns_name)
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address)
gw_ip = ex_gw_port['subnet']['gateway_ip']
if gw_ip:
ipd = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ns_name)
ipd.route.add_gateway(gw_ip)
cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name]
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def internal_ns_interface_added(self, ip_cidr,
interface_name, ns_name):
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name)
ip_wrapper.netns.execute(['ip', 'addr', 'add',
ip_cidr, 'dev', interface_name])
def external_gateway_removed(self, ri, ex_gw_port, interface_name, ext_port_exists):
if ri.router['distributed']:
self.process_router_floating_ip_nat_rules(
ri)
self.process_router_floating_ip_addresses(
ri, ex_gw_port)
for p in ri.internal_ports:
internal_interface = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(ri, p, internal_interface)
if self.conf.agent_mode == 'dvr_snat' and ext_port_exists:
ns_name = self.get_snat_ns_name(ri.router['id'])
else:
# not hosting agent - no work to do
LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port)
return
else:
ns_name = ri.ns_name
if ri.is_ha:
self._ha_external_gateway_removed(ri, interface_name)
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
if ri.router['distributed']:
self._destroy_snat_namespace(ns_name)
if self.provider_name == 'aws':
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_elastic_ip = ip_addr['ip_address']
ex_gw_private_ip = self.private_ip_map.get(ex_gw_elastic_ip, None)
if not ex_gw_private_ip:
LOG.error(_('elastic_ip %s does not assign to private ip'), ex_gw_elastic_ip)
return
addresses = self.ec2_conn.get_all_addresses(addresses = [ex_gw_elastic_ip])
if not addresses:
LOG.debug(_('elastic_ip %s does not exist'), ex_gw_elastic_ip)
continue
association_id = addresses[0].association_id
if association_id:
self.ec2_conn.disassociate_address(association_id = association_id)
self.ec2_conn.unassign_private_ip_addresses(self.network_interface_id, ex_gw_private_ip)
self.reclaim_private_ip(ex_gw_elastic_ip)
elif self.provider_name == 'vcloud':
#vcloud adopt pre-configured on edge, so do nothing
pass
def metadata_filter_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' % self.conf.metadata_port))
return rules
def public_cloud_filter_rules(self):
rules = []
if self.relay_cidr:
rules.append(('PREROUTING', '-i qr+ -s 0.0.0.0/0 -d %s '
'-j DROP' % self.relay_cidr))
return rules
def metadata_nat_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % self.conf.metadata_port))
return rules
def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs,
interface_name):
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})]
for cidr in internal_cidrs:
rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
return rules
def _snat_redirect_add(self, ri, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
try:
snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value
ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper,
namespace=ri.ns_name)
ns_ipd.route.add_gateway(gateway, table=snat_idx)
ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx)
ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'
'send_redirects=0' % sn_int])
except Exception:
LOG.exception(_('DVR: error adding redirection logic'))
def _snat_redirect_remove(self, ri, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
try:
snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value
ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper,
namespace=ri.ns_name)
ns_ipd.route.delete_gateway(table=snat_idx)
ns_ipr.delete_rule_priority(snat_idx)
except Exception:
LOG.exception(_('DVR: removed snat failed'))
def _internal_network_added(self, ns_name, network_id, port_id,
internal_cidr, mac_address,
interface_name, prefix, is_ha=False):
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
if not is_ha:
self.driver.init_l3(interface_name, [internal_cidr],
namespace=ns_name)
ip_address = internal_cidr.split('/')[0]
self._send_gratuitous_arp_packet(ns_name, interface_name,
ip_address)
def internal_network_added(self, ri, port):
network_id = port['network_id']
port_id = port['id']
internal_cidr = port['ip_cidr']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(ri.ns_name, network_id, port_id,
internal_cidr, mac_address,
interface_name, INTERNAL_DEV_PREFIX,
ri.is_ha)
if ri.is_ha:
self._add_vip(ri, internal_cidr, interface_name)
ex_gw_port = self._get_ex_gw_port(ri)
if ri.router['distributed'] and ex_gw_port:
snat_ports = self.get_snat_interfaces(ri)
sn_port = self._map_internal_interfaces(ri, port, snat_ports)
if sn_port:
self._snat_redirect_add(ri, sn_port['fixed_ips'][0]
['ip_address'], port, interface_name)
if (self.conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host(ri.router) == self.host):
ns_name = self.get_snat_ns_name(ri.router['id'])
self._set_subnet_info(sn_port)
interface_name = (
self.get_snat_int_device_name(sn_port['id']))
self._internal_network_added(ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
interface_name,
SNAT_INT_DEV_PREFIX)
def internal_network_removed(self, ri, port):
port_id = port['id']
interface_name = self.get_internal_device_name(port_id)
if ri.router['distributed'] and ri.ex_gw_port:
# DVR handling code for SNAT
self._snat_redirect_remove(ri, port, interface_name)
if self.conf.agent_mode == 'dvr_snat' and (
ri.ex_gw_port['binding:host_id'] == self.host):
snat_port = self._map_internal_interfaces(ri, port,
ri.snat_ports)
if snat_port:
snat_interface = (
self.get_snat_int_device_name(snat_port['id'])
)
ns_name = self.get_snat_ns_name(ri.router['id'])
prefix = SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface,
root_helper=self.root_helper,
namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name):
if ri.is_ha:
self._clear_vips(ri, interface_name)
self.driver.unplug(interface_name, namespace=ri.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def internal_network_nat_rules(self, ex_gw_ip, internal_cidr):
rules = [('snat', '-s %s -j SNAT --to-source %s' %
(internal_cidr, ex_gw_ip))]
return rules
def _create_agent_gateway_port(self, ri, network_id):
"""Create Floating IP gateway port.
Request port creation from Plugin then creates
Floating IP namespace and adds gateway port.
"""
self.agent_gateway_port[network_id] = (
self.plugin_rpc.get_agent_gateway_port(
self.context, network_id))
if 'subnet' not in self.agent_gateway_port[network_id]:
LOG.error(_('Missing subnet/agent_gateway_port'))
return
self._set_subnet_info(self.agent_gateway_port[network_id])
# add fip-namespace and agent_gateway_port
fip_ns_name = (
self.get_fip_ns_name(str(network_id)))
self._create_namespace(fip_ns_name)
ri.fip_iptables_manager = iptables_manager.IptablesManager(
root_helper=self.root_helper, namespace=fip_ns_name,
use_ipv6=self.use_ipv6)
# no connection tracking needed in fip namespace
ri.fip_iptables_manager.ipv4['raw'].add_rule('PREROUTING',
'-j CT --notrack')
ri.fip_iptables_manager.apply()
interface_name = (
self.get_fip_ext_device_name(self.agent_gateway_port[network_id]['id']))
self.agent_gateway_added(fip_ns_name, self.agent_gateway_port[network_id],
interface_name)
def create_rtr_2_fip_link(self, ri, network_id):
"""Create interface between router and Floating IP namespace."""
rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id)
fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id)
fip_ns_name = self.get_fip_ns_name(str(network_id))
# add link local IP to interface
if ri.rtr_fip_subnet is None:
ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id)
rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name)
if not ip_lib.device_exists(fip_2_rtr_name,
root_helper=self.root_helper,
namespace=fip_ns_name) and not ip_lib.device_exists(rtr_2_fip_name,
root_helper=self.root_helper,
namespace=ri.ns_name):
int_dev = ip_wrapper.add_veth(rtr_2_fip_name,
fip_2_rtr_name, fip_ns_name)
self.internal_ns_interface_added(str(rtr_2_fip),
rtr_2_fip_name, ri.ns_name)
self.internal_ns_interface_added(str(fip_2_rtr),
fip_2_rtr_name, fip_ns_name)
int_dev[0].link.set_up()
int_dev[1].link.set_up()
# add default route for the link local interface
device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper,
namespace=ri.ns_name)
device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
#setup the NAT rules and chains
self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules')
# kicks the FW Agent to add rules for the IR namespace if configured
self.process_router_add(ri)
def floating_ip_added_dist(self, ri, fip):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_priorities.pop()
ri.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id)
ip_rule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ip_rule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr)
#Add routing rule in fip namespace
fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX
fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id']))
rtr_2_fip, _ = ri.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper,
namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.get_fip_ext_device_name(self.agent_gateway_port.get(fip['floating_network_id'], None)['id']))
self._send_gratuitous_arp_packet(fip_ns_name,
interface_name, floating_ip,
distributed=True)
def floating_ip_removed_dist(self, ri, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id)
fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id)
rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
fip_ns_name = self.get_fip_ns_name(str(ri.ex_gw_port['network_id']))
ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
if floating_ip in ri.floating_ips_dict:
rule_pr = ri.floating_ips_dict[floating_ip]
ip_rule_rtr.delete_rule_priority(rule_pr)
try:
self.fip_priorities.add(rule_pr)
except Exception:
self.fip_priorities.add(rule_pr)
#TODO(rajeev): Handle else case - exception/log?
else:
rule_pr = None
device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper,
namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
cidr_count = self._get_fip_device_cidr_count(rtr_2_fip_name,
ri.ns_name)
if cidr_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper,
namespace=ri.ns_name)
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
self.local_subnets.release(ri.router_id)
ri.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
# clean up fip-namespace if this is the last FIP
if not self._check_fip_devices(fip_ns_name):
self._destroy_fip_namespace(fip_ns_name)
del self.agent_gateway_port[ri.ex_gw_port['network_id']]
def _get_fip_device_cidr_count(self, interface_name, ns_name):
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ns_name)
existing_cidrs = [addr['cidr'] for addr in device.addr.list()]
fip_cidrs = [c for c in existing_cidrs if c.endswith(
FLOATING_IP_CIDR_SUFFIX)]
return len(fip_cidrs)
def _check_fip_devices(self, ns):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX):
return True
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug(_('Got router deleted notification for %s'), router_id)
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update)
def _update_arp_entry(self, ri, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self.get_internal_port(ri, subnet_id)
# update arp entry only if the subnet is attached to the router
if port:
ip_cidr = str(ip) + '/32'
try:
# TODO(mrsmith): optimize the calls below for bulk calls
net = netaddr.IPNetwork(ip_cidr)
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name)
if operation == 'add':
device.neigh.add(net.version, ip, mac)
elif operation == 'delete':
device.neigh.delete(net.version, ip, mac)
except Exception:
LOG.exception(_("DVR: Failed updating arp entry"))
self.fullsync = True
def add_arp_entry(self, context, payload):
"""Add arp entry into router namespace. Called from RPC."""
arp_table = payload['arp_table']
router_id = payload['router_id']
ip = arp_table['ip_address']
mac = arp_table['mac_address']
subnet_id = arp_table['subnet_id']
ri = self.router_info.get(router_id)
if ri:
self._update_arp_entry(ri, ip, mac, subnet_id, 'add')
def del_arp_entry(self, context, payload):
"""Delete arp entry from router namespace. Called from RPC."""
arp_table = payload['arp_table']
router_id = payload['router_id']
ip = arp_table['ip_address']
mac = arp_table['mac_address']
subnet_id = arp_table['subnet_id']
ri = self.router_info.get(router_id)
if ri:
self._update_arp_entry(ri, ip, mac, subnet_id, 'delete')
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug(_('Got routers updated notification :%s'), routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = RouterUpdate(id, PRIORITY_RPC)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug(_('Got router removed from agent :%r'), payload)
router_id = payload['router_id']
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug(_('Got router added to agent :%r'), payload)
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
pool = eventlet.GreenPool()
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if (target_ex_net_id and ex_net_id and
ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if (ex_net_id != self._fetch_external_net_id(force=True)):
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
pool.spawn_n(self.process_router, ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
pool.spawn_n(self._router_removed, router_id)
pool.waitall()
def _process_router_update(self):
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s", update.id)
router = update.router
if update.action != DELETE_ROUTER and not router:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = _("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
if routers:
router = routers[0]
if not router:
self._router_removed(update.id)
continue
self._process_routers([router])
LOG.debug("Finished a router update for %s", update.id)
rp.fetched_and_processed(update.timestamp)
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
pool = eventlet.GreenPool(size=8)
while True:
pool.spawn_n(self._process_router_update)
def _router_ids(self):
if not self.conf.use_namespaces:
return [self.conf.router_id]
@periodic_task.periodic_task
def periodic_sync_routers_task(self, context):
self._sync_routers_task(context)
def _sync_routers_task(self, context):
if self.services_sync:
super(L3NATAgent, self).process_services_sync(context)
LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
self.fullsync)
if not self.fullsync:
return
# Capture a picture of namespaces *before* fetching the full list from
# the database. This is important to correctly identify stale ones.
namespaces = set()
if self._clean_stale_namespaces:
namespaces = self._list_namespaces()
prev_router_ids = set(self.router_info)
try:
router_ids = self._router_ids()
timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(
context, router_ids)
LOG.debug(_('Processing :%r'), routers)
for r in routers:
update = RouterUpdate(r['id'],
PRIORITY_SYNC_ROUTERS_TASK,
router=r,
timestamp=timestamp)
self._queue.add(update)
self.fullsync = False
LOG.debug(_("_sync_routers_task successfully completed"))
except n_rpc.RPCException:
LOG.exception(_("Failed synchronizing routers due to RPC error"))
self.fullsync = True
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
else:
# Resync is not necessary for the cleanup of stale namespaces
curr_router_ids = set([r['id'] for r in routers])
# Two kinds of stale routers: Routers for which info is cached in
# self.router_info and the others. First, handle the former.
for router_id in prev_router_ids - curr_router_ids:
update = RouterUpdate(router_id,
PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=DELETE_ROUTER)
self._queue.add(update)
# Next, one effort to clean out namespaces for which we don't have
# a record. (i.e. _clean_stale_namespaces=False after one pass)
if self._clean_stale_namespaces:
ids_to_keep = curr_router_ids | prev_router_ids
self._cleanup_namespaces(namespaces, ids_to_keep)
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
eventlet.spawn_n(self._sync_routers_task, self.context)
LOG.info(_("L3 agent started"))
def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ns_name = ri.ns_name
if ri.router['distributed']:
ns_name = self.get_snat_ns_name(ri.router['id'])
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def get_subnet_by_nexthop(self, nexthop, subnets):
"""
if the nexthop is an ip of the subnets, return the subnet
else return None
:param nexthop: A Ipv4 Ip
:param subnets: The router interface subnets
:return: subnet
"""
nexthop_ip = netaddr.IPNetwork(nexthop)
for subnet in subnets:
ip = netaddr.IPNetwork(subnet)
if (int(ip.ip) & int(ip.netmask)) == (int(nexthop_ip.ip) & int(ip.netmask)):
return subnet
return None
def get_extra_route_map_by_subnet(self, ri, subnet):
"""
This will build a map , the key is the router sbunet, and the value is
{rouer_table(20-220), rule priority, counter}
if the subnet has been mapped, just return directly.
else mapping it.
:param subnet:
:return:
"""
if subnet in ri.extra_route_map.keys():
return ri.extra_route_map[subnet]
ri.extra_route_map[subnet] = {
'rt_table': ri.extra_route_rt_tables.pop(),
'priority': ri.extra_route_priorities.pop(),
'count': 0
}
return ri.extra_route_map[subnet]
def _add_routes(self, ri, route, subnet, rt_map):
"""
if the subnet is the first add in. config the rule
else just config the route table
:param ri:
:param route:
:param subnet:
:param rt_map:
:return:
"""
LOG.debug("Enter _add_route function the route is %s, the subnet is %s, the rt_map is %s"
% (route, subnet, rt_map))
rule_pr = None
route_table = None
if rt_map:
rule_pr = rt_map['priority']
route_table = rt_map['rt_table']
if rt_map['count'] == 1:
ip_rule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ip_rule.add_rule_from(subnet, route_table, rule_pr)
cmd = ['ip', 'route', 'replace', 'to', route['destination'],
'via', route['nexthop'], 'table', route_table]
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def _del_routes(self, ri, route, subnet, rt_map):
"""
remove the rule and the route entry
:param ri:
:param route:
:param subnet:
:param rt_map:
:return:
"""
LOG.debug("Enter _del_routes function: the route is %s, the subnet is %s, the rt_map is %s"
% (route, subnet, rt_map))
rule_pr = rt_map['priority']
route_table = rt_map['rt_table']
count = rt_map['count']
cmd = ['ip', 'route', 'delete', 'to', route['destination'],
'via', route['nexthop'], 'table', route_table]
try:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
except:
LOG.debug("excute remove ip route failed")
if count == 0:
ip_rule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ip_rule.delete_rule_priority(rule_pr)
ri.extra_route_priorities.add(rule_pr)
ri.extra_route_rt_tables.add(route_table)
def clear_vpn_routes(self, ri):
ip_rule_cmd = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name)
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name)
rule_list = self.get_ip_rule_list(ri.ns_name)
for rule in rule_list:
priority = rule.get('priority', None)
if priority:
ip_rule_cmd.delete_rule_priority(priority)
tables = rule.get('tables', [])
for table in tables:
nexthop = table.get('nexthop', None)
destination = table.get('destination', None)
tb_num = table.get("tb_number", None)
cmd = ['ip', 'route', 'delete', 'to', destination,
'via', nexthop, 'table', tb_num]
ip_wrapper.netns.execute(cmd, check_exit_code=False)
LOG.debug("clear vpn routes success the rule list is %s" % rule_list)
def route_table_has_nexthop(self, ns_name, nexthop):
LOG.debug("Enter route_table_has_nexthop function the nexthop is %s" % nexthop)
rule_list = self.get_ip_rule_list(ns_name)
for rule in rule_list:
tables = rule.get('tables', [])
if not tables and self.get_subnet_by_nexthop(nexthop, [rule.get('cidr')]):
return rule.get('cidr')
for table in tables:
if nexthop == table.get('nexthop', None):
LOG.debug("Finished handle route_table_has_nexthop the nexthop is %s, \
and the sunbet is %s" %(nexthop, rule.get('cidr')))
return rule.get('cidr')
return None
def get_ip_routes(self, table, ns_name):
ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name)
list_cmd = ['ip', 'route', 'show', 'table', table]
retval = []
route_list = ip_wrapper.netns.execute(list_cmd, check_exit_code=False).split('\n')
if route_list:
for route in route_list:
route_inst = str(route).strip().split()
if route_inst:
retval.append({
'destination': route_inst[0],
'action': route_inst[1],
'nexthop': route_inst[2],
'tb_number': table
})
return retval
def get_ip_rule_list(self, ns_name):
args = ['list']
ip_rule_cmd = ip_lib.IpRule(self.root_helper, namespace=ns_name)
ip_rule_list = ip_rule_cmd._as_root("", 'rule', tuple(args)).split('\n')
retval = []
for ip_rule in ip_rule_list:
if not ip_rule:
continue
inst = {}
parts = str(ip_rule).strip().split(':\t')
if parts:
inst['priority'] = int(parts[0])
if inst['priority'] in range(VPN_PR_START, VPN_PR_END + 1):
rules = parts[1].strip().split()
if rules:
inst['action'] = rules[0]
inst['cidr'] = rules[1]
inst['tables'] = self.get_ip_routes(rules[3], ns_name=ns_name)
retval.append(inst)
return retval
def routes_updated(self, ri):
LOG.debug("Enter routes updated function")
new_routes = ri.router['routes']
if ri.is_ha:
self._process_virtual_routes(ri, new_routes)
return
old_routes = ri.routes
if not old_routes:
try:
self.clear_vpn_routes(ri)
except Exception, e:
LOG.error("clear vpn ip route failed %s" % e)
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
interfaces = ri.router.get(l3_constants.INTERFACE_KEY, [])
subnets = [
interface['subnet']['cidr'] for interface in interfaces
]
for route in adds:
LOG.debug(_("Added route entry is '%s'"), route)
nexthop = route['nexthop']
subnet = self.get_subnet_by_nexthop(nexthop, subnets)
# if the nexthop is not in the subnets, handle it normal
if not subnet:
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
self._update_routing_table(ri, 'replace', route)
else:
route_map = self.get_extra_route_map_by_subnet(ri, subnet)
route_map['count'] += 1
for del_route in removes:
if (subnet == self.get_subnet_by_nexthop(del_route['nexthop'], subnets)) and \
del_route['destination'] == route['destination']:
removes.remove(del_route)
self._add_routes(ri, route, subnet, route_map)
for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route)
nexthop = route['nexthop']
subnet = self.get_subnet_by_nexthop(nexthop, subnets)
# if the nexthop is not in the subnets, handle it normal
if not subnet:
sub = self.route_table_has_nexthop(ri.ns_name, nexthop)
if sub:
route_map = self.get_extra_route_map_by_subnet(ri, sub)
route_map['count'] -= 1
self._del_routes(ri, route, sub, route_map)
else:
self._update_routing_table(ri, 'delete', route)
else:
route_map = self.get_extra_route_map_by_subnet(ri, subnet)
route_map['count'] -= 1
self._del_routes(ri, route, subnet, route_map)
ri.routes = new_routes
def _allocate_pools_for_cidr(self, cidr):
"""Create IP allocation pools for a given cidr"""
pools = []
if not cidr:
return pools
net = netaddr.IPNetwork(cidr)
pools = [str(netaddr.IPAddress(x)) for x in range(net.first + 1, net.last)]
return pools
def provision_private_ip(self, elastic_ip):
'''Provisions a private ip.
:param elastic_ip: the elastic_ip associated with this private ip.
:param execpt_private_ip: the expect private ip address
'''
private_ip = self.private_ip_map.get(elastic_ip)
if not private_ip:
if not self.available_private_ips:
LOG.error(_("No private ip available for elastic_ip=%s"), elastic_ip)
return
private_ip = self.available_private_ips.pop()
self.private_ip_map[elastic_ip] = private_ip
self.elastic_ip_map[private_ip] = elastic_ip
LOG.info(_("elastic_ip %s mapping to private ip %s."), elastic_ip, private_ip)
def reclaim_private_ip(self, elastic_ip):
'''Reclaim a private ip.
:param elastic_ip: the elastic_ip associated with this private ip.
'''
private_ip = self.private_ip_map.pop(elastic_ip, None)
if not private_ip:
LOG.info(_("elastic_ip %s not assign to private ip."), elastic_ip)
return
self.available_private_ips.add(private_ip)
self.elastic_ip_map.pop(private_ip, None)
LOG.info(_("elastic_ip %s cancel mapping to private ip %s."), elastic_ip, private_ip)
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
LOG.debug(_("Report state task started"))
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
result = self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
if self.agent_state.pop('start_flag', None) and result:
self.relay_cidr = result.get('isolate_relay_cidr', '')
LOG.debug("Get isolate_relay_cidr success is :%s" % self.relay_cidr)
self.use_call = False
LOG.debug(_("Report state task successfully completed"))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def _register_opts(conf):
conf.register_opts(L3NATAgent.OPTS)
conf.register_opts(L3NATAgent.PROVIDER_OPTS, 'provider_opts')
conf.register_opts(l3_ha_agent.OPTS)
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'):
_register_opts(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
|
xliux/chess
|
refs/heads/master
|
third_party/gtest-1.7.0/test/gtest_test_utils.py
|
1100
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
hn5092/hadoop-common
|
refs/heads/HADOOP-3628
|
src/examples/python/WordCount.py
|
123
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from org.apache.hadoop.fs import Path
from org.apache.hadoop.io import *
from org.apache.hadoop.mapred import *
import sys
import getopt
class WordCountMap(Mapper, MapReduceBase):
one = IntWritable(1)
def map(self, key, value, output, reporter):
for w in value.toString().split():
output.collect(Text(w), self.one)
class Summer(Reducer, MapReduceBase):
def reduce(self, key, values, output, reporter):
sum = 0
while values.hasNext():
sum += values.next().get()
output.collect(key, IntWritable(sum))
def printUsage(code):
print "wordcount [-m <maps>] [-r <reduces>] <input> <output>"
sys.exit(code)
def main(args):
conf = JobConf(WordCountMap);
conf.setJobName("wordcount");
conf.setOutputKeyClass(Text);
conf.setOutputValueClass(IntWritable);
conf.setMapperClass(WordCountMap);
conf.setCombinerClass(Summer);
conf.setReducerClass(Summer);
try:
flags, other_args = getopt.getopt(args[1:], "m:r:")
except getopt.GetoptError:
printUsage(1)
if len(other_args) != 2:
printUsage(1)
for f,v in flags:
if f == "-m":
conf.setNumMapTasks(int(v))
elif f == "-r":
conf.setNumReduceTasks(int(v))
conf.setInputPath(Path(other_args[0]))
conf.setOutputPath(Path(other_args[1]))
JobClient.runJob(conf);
if __name__ == "__main__":
main(sys.argv)
|
YingHsuan/termite_data_server
|
refs/heads/master
|
web2py/applications-original/admin/languages/pt.py
|
19
|
# coding: utf8
{
'!langcode!': 'pt',
'!langname!': 'Português',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novo_valor\'". Não é permitido atualizar ou apagar resultados de um JOIN',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s %%{row} deleted': '%s registros apagados',
'%s %%{row} updated': '%s registros atualizados',
'(requires internet access)': '(requer acesso a internet)',
'(something like "it-it")': '(algo como "it-it")',
'@markmin\x01Searching: **%s** %%{file}': 'Searching: **%s** files',
'A new version of web2py is available': 'Está disponível uma nova versão do web2py',
'A new version of web2py is available: %s': 'Está disponível uma nova versão do web2py: %s',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENÇÃO o login requer uma conexão segura (HTTPS) ou executar de localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENÇÃO OS TESTES NÃO THREAD SAFE, NÃO EFETUE MÚLTIPLOS TESTES AO MESMO TEMPO.',
'ATTENTION: you cannot edit the running application!': 'ATENÇÃO: Não pode modificar a aplicação em execução!',
'About': 'sobre',
'About application': 'Sobre a aplicação',
'Additional code for your application': 'Additional code for your application',
'Admin is disabled because insecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin is disabled because unsecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin language': 'Linguagem do Admin',
'Administrator Password:': 'Senha de administrador:',
'Application name:': 'Nome da aplicação:',
'Are you sure you want to delete file "%s"?': 'Tem certeza que deseja apagar o arquivo "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Tem certeza que deseja apagar o plugin "%s"?',
'Are you sure you want to uninstall application "%s"': 'Tem certeza que deseja apagar a aplicação "%s"?',
'Are you sure you want to uninstall application "%s"?': 'Tem certeza que deseja apagar a aplicação "%s"?',
'Are you sure you want to upgrade web2py now?': 'Tem certeza que deseja atualizar o web2py agora?',
'Available databases and tables': 'Bancos de dados e tabelas disponíveis',
'Cannot be empty': 'Não pode ser vazio',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Não é possível compilar: Existem erros em sua aplicação. Depure, corrija os errros e tente novamente',
'Cannot compile: there are errors in your app:': 'Não é possível compilar: Existem erros em sua aplicação',
'Change Password': 'Trocar Senha',
'Change admin password': 'mudar senha de administrador',
'Check for upgrades': 'checar por atualizações',
'Check to delete': 'Marque para apagar',
'Checking for upgrades...': 'Buscando atualizações...',
'Clean': 'limpar',
'Click row to expand traceback': 'Clique em uma coluna para expandir o log do erro',
'Client IP': 'IP do cliente',
'Compile': 'compilar',
'Controllers': 'Controladores',
'Count': 'Contagem',
'Create': 'criar',
'Create new application using the Wizard': 'Criar nova aplicação utilizando o assistente',
'Create new simple application': 'Crie uma nova aplicação',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'DESIGN': 'Projeto',
'Date and Time': 'Data e Hora',
'Delete': 'Apague',
'Delete:': 'Apague:',
'Deploy': 'publicar',
'Deploy on Google App Engine': 'Publicar no Google App Engine',
'Description': 'Descrição',
'Design for': 'Projeto de',
'Detailed traceback description': 'Detailed traceback description',
'E-mail': 'E-mail',
'EDIT': 'EDITAR',
'Edit': 'editar',
'Edit Profile': 'Editar Perfil',
'Edit application': 'Editar aplicação',
'Edit current record': 'Editar o registro atual',
'Editing Language file': 'Editando arquivo de linguagem',
'Editing file': 'Editando arquivo',
'Editing file "%s"': 'Editando arquivo "%s"',
'Enterprise Web Framework': 'Framework web empresarial',
'Error': 'Erro',
'Error logs for "%(app)s"': 'Logs de erro para "%(app)s"',
'Error snapshot': 'Error snapshot',
'Error ticket': 'Error ticket',
'Errors': 'erros',
'Exception instance attributes': 'Atributos da instancia de excessão',
'File': 'Arquivo',
'First name': 'Nome',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'Funções sem doctests resultarão em testes [aceitos].',
'Group ID': 'ID do Grupo',
'Hello World': 'Olá Mundo',
'Help': 'ajuda',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Se o relatório acima contém um número de ticket, isso indica uma falha no controlador em execução, antes de tantar executar os doctests. Isto acontece geralmente por erro de endentação ou erro fora do código da função.\nO titulo em verde indica que os testes (se definidos) passaram. Neste caso os testes não são mostrados.',
'Import/Export': 'Importar/Exportar',
'Install': 'instalar',
'Installed applications': 'Aplicações instaladas',
'Internal State': 'Estado Interno',
'Invalid Query': 'Consulta inválida',
'Invalid action': 'Ação inválida',
'Invalid email': 'E-mail inválido',
'Language files (static strings) updated': 'Arquivos de linguagem (textos estáticos) atualizados',
'Languages': 'Linguagens',
'Last name': 'Sobrenome',
'Last saved on:': 'Salvo em:',
'License for': 'Licença para',
'Login': 'Entrar',
'Login to the Administrative Interface': 'Entrar na interface adminitrativa',
'Logout': 'finalizar sessão',
'Lost Password': 'Senha perdida',
'Models': 'Modelos',
'Modules': 'Módulos',
'NO': 'NÃO',
'Name': 'Nome',
'New Record': 'Novo registro',
'New application wizard': 'Assistente para novas aplicações ',
'New simple application': 'Nova aplicação básica',
'No databases in this application': 'Não existem bancos de dados nesta aplicação',
'Origin': 'Origem',
'Original/Translation': 'Original/Tradução',
'Overwrite installed app': 'sobrescrever aplicação instalada',
'PAM authenticated user, cannot change password here': 'usuario autenticado por PAM, não pode alterar a senha por aqui',
'Pack all': 'criar pacote',
'Pack compiled': 'criar pacote compilado',
'Password': 'Senha',
'Peeking at file': 'Visualizando arquivo',
'Plugin "%s" in application': 'Plugin "%s" na aplicação',
'Plugins': 'Plugins',
'Powered by': 'Este site utiliza',
'Query:': 'Consulta:',
'Record ID': 'ID do Registro',
'Register': 'Registrar-se',
'Registration key': 'Chave de registro',
'Remove compiled': 'eliminar compilados',
'Resolve Conflict file': 'Arquivo de resolução de conflito',
'Role': 'Papel',
'Rows in table': 'Registros na tabela',
'Rows selected': 'Registros selecionados',
'Saved file hash:': 'Hash do arquivo salvo:',
'Site': 'site',
'Start wizard': 'iniciar assistente',
'Static files': 'Arquivos estáticos',
'Sure you want to delete this object?': 'Tem certeza que deseja apaagr este objeto?',
'TM': 'MR',
'Table name': 'Nome da tabela',
'Testing application': 'Testando a aplicação',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'A "consulta" é uma condição como "db.tabela.campo1==\'valor\'". Algo como "db.tabela1.campo1==db.tabela2.campo2" resulta em um JOIN SQL.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'There are no controllers': 'Não existem controllers',
'There are no models': 'Não existem modelos',
'There are no modules': 'Não existem módulos',
'There are no plugins': 'There are no plugins',
'There are no static files': 'Não existem arquicos estáticos',
'There are no translators, only default language is supported': 'Não há traduções, somente a linguagem padrão é suportada',
'There are no views': 'Não existem visões',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This is the %(filename)s template': 'Este é o template %(filename)s',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'Timestamp': 'Data Atual',
'To create a plugin, name a file/folder plugin_[name]': 'Para criar um plugin, nomeio um arquivo/pasta como plugin_[nome]',
'Traceback': 'Traceback',
'Translation strings for the application': 'Translation strings for the application',
'Unable to check for upgrades': 'Não é possível checar as atualizações',
'Unable to download': 'Não é possível efetuar o download',
'Unable to download app': 'Não é possível baixar a aplicação',
'Unable to download app because:': 'Não é possível baixar a aplicação porque:',
'Unable to download because': 'Não é possível baixar porque',
'Uninstall': 'desinstalar',
'Update:': 'Atualizar:',
'Upload & install packed application': 'Faça upload e instale uma aplicação empacotada',
'Upload a package:': 'Faça upload de um pacote:',
'Upload existing application': 'Faça upload de uma aplicação existente',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para criar consultas mais complexas.',
'Use an url:': 'Use uma url:',
'User ID': 'ID do Usuario',
'Version': 'Versão',
'Views': 'Visões',
'Welcome to web2py': 'Bem-vindo ao web2py',
'YES': 'SIM',
'additional code for your application': 'código adicional para sua aplicação',
'admin disabled because no admin password': ' admin desabilitado por falta de senha definida',
'admin disabled because not supported on google app engine': 'admin dehabilitado, não é soportado no GAE',
'admin disabled because unable to access password file': 'admin desabilitado, não foi possível ler o arquivo de senha',
'administrative interface': 'interface administrativa',
'and rename it (required):': 'e renomeie (requerido):',
'and rename it:': ' e renomeie:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin desabilitado, canal inseguro',
'application "%s" uninstalled': 'aplicação "%s" desinstalada',
'application compiled': 'aplicação compilada',
'application is compiled and cannot be designed': 'A aplicação está compilada e não pode ser modificada',
'arguments': 'argumentos',
'back': 'voltar',
'browse': 'buscar',
'cache': 'cache',
'cache, errors and sessions cleaned': 'cache, erros e sessões eliminadas',
'cannot create file': 'Não é possível criar o arquivo',
'cannot upload file "%(filename)s"': 'não é possível fazer upload do arquivo "%(filename)s"',
'check all': 'marcar todos',
'click here for online examples': 'clique para ver exemplos online',
'click here for the administrative interface': 'Clique aqui para acessar a interface administrativa',
'click to check for upgrades': 'clique aqui para checar por atualizações',
'click to open': 'clique para abrir',
'code': 'código',
'collapse/expand all': 'collapse/expand all',
'commit (mercurial)': 'commit (mercurial)',
'compiled application removed': 'aplicação compilada removida',
'controllers': 'controladores',
'create file with filename:': 'criar um arquivo com o nome:',
'create new application:': 'nome da nova aplicação:',
'created by': 'criado por',
'crontab': 'crontab',
'currently running': 'Executando',
'currently saved or': 'Atualmente salvo ou',
'customize me!': 'Modifique-me',
'data uploaded': 'Dados enviados',
'database': 'banco de dados',
'database %s select': 'Seleção no banco de dados %s',
'database administration': 'administração de banco de dados',
'db': 'db',
'defines tables': 'define as tabelas',
'delete': 'apagar',
'delete all checked': 'apagar marcados',
'delete plugin': 'apagar plugin',
'design': 'modificar',
'direction: ltr': 'direção: ltr',
'done!': 'feito!',
'download layouts': 'download layouts',
'download plugins': 'download plugins',
'edit controller': 'editar controlador',
'edit views:': 'editar visões:',
'export as csv file': 'exportar como arquivo CSV',
'exposes': 'expõe',
'extends': 'estende',
'failed to reload module': 'Falha ao recarregar o módulo',
'failed to reload module because:': 'falha ao recarregar o módulo por:',
'file "%(filename)s" created': 'arquivo "%(filename)s" criado',
'file "%(filename)s" deleted': 'arquivo "%(filename)s" apagado',
'file "%(filename)s" uploaded': 'arquivo "%(filename)s" enviado',
'file "%(filename)s" was not deleted': 'arquivo "%(filename)s" não foi apagado',
'file "%s" of %s restored': 'arquivo "%s" de %s restaurado',
'file changed on disk': 'arquivo modificado no disco',
'file does not exist': 'arquivo não existe',
'file saved on %(time)s': 'arquivo salvo em %(time)s',
'file saved on %s': 'arquivo salvo em %s',
'filter': 'filter',
'htmledit': 'htmledit',
'includes': 'inclui',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'inspect attributes': 'inspect attributes',
'internal error': 'erro interno',
'invalid password': 'senha inválida',
'invalid request': 'solicitação inválida',
'invalid ticket': 'ticket inválido',
'language file "%(filename)s" created/updated': 'arquivo de linguagem "%(filename)s" criado/atualizado',
'languages': 'linguagens',
'languages updated': 'linguagens atualizadas',
'loading...': 'carregando...',
'locals': 'locals',
'login': 'inicio de sessão',
'manage': 'gerenciar',
'merge': 'juntar',
'models': 'modelos',
'modules': 'módulos',
'new application "%s" created': 'nova aplicação "%s" criada',
'new plugin installed': 'novo plugin instalado',
'new record inserted': 'novo registro inserido',
'next 100 rows': 'próximos 100 registros',
'no match': 'não encontrado',
'or import from csv file': 'ou importar de um arquivo CSV',
'or provide app url:': 'ou forneça a url de uma aplicação:',
'or provide application url:': 'ou forneça a url de uma aplicação:',
'pack plugin': 'empacotar plugin',
'password changed': 'senha alterada',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" eliminado',
'plugins': 'plugins',
'previous 100 rows': '100 registros anteriores',
'record': 'registro',
'record does not exist': 'o registro não existe',
'record id': 'id do registro',
'request': 'request',
'response': 'response',
'restore': 'restaurar',
'revert': 'reverter',
'save': 'salvar',
'selected': 'selecionado(s)',
'session': 'session',
'session expired': 'sessão expirada',
'shell': 'Terminal',
'some files could not be removed': 'alguns arquicos não puderam ser removidos',
'state': 'estado',
'static': 'estáticos',
'submit': 'enviar',
'table': 'tabela',
'test': 'testar',
'the application logic, each URL path is mapped in one exposed function in the controller': 'A lógica da aplicação, cada URL é mapeada para uma função exposta pelo controlador',
'the data representation, define database tables and sets': 'A representação dos dadps, define tabelas e estruturas de dados',
'the presentations layer, views are also known as templates': 'A camada de apresentação, As visões também são chamadas de templates',
'these files are served without processing, your images go here': 'Estes arquivos são servidos sem processamento, suas imagens ficam aqui',
'to previous version.': 'para a versão anterior.',
'translation strings for the application': 'textos traduzidos para a aplicação',
'try': 'tente',
'try something like': 'tente algo como',
'unable to create application "%s"': 'não é possível criar a aplicação "%s"',
'unable to delete file "%(filename)s"': 'não é possível criar o arquico "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'não é possível criar o plugin "%(plugin)s"',
'unable to parse csv file': 'não é possível analisar o arquivo CSV',
'unable to uninstall "%s"': 'não é possível instalar "%s"',
'unable to upgrade because "%s"': 'não é possível atualizar porque "%s"',
'uncheck all': 'desmarcar todos',
'update': 'atualizar',
'update all languages': 'atualizar todas as linguagens',
'upgrade web2py now': 'atualize o web2py agora',
'upload': 'upload',
'upload application:': 'Fazer upload de uma aplicação:',
'upload file:': 'Enviar arquivo:',
'upload plugin file:': 'Enviar arquivo de plugin:',
'variables': 'variáveis',
'versioning': 'versionamento',
'view': 'visão',
'views': 'visões',
'web2py Recent Tweets': 'Tweets Recentes de @web2py',
'web2py is up to date': 'web2py está atualizado',
'web2py upgraded; please restart it': 'web2py atualizado; favor reiniciar',
}
|
chauhanhardik/populo_2
|
refs/heads/master
|
openedx/core/djangoapps/credit/serializers.py
|
37
|
""" Credit API Serializers """
from rest_framework import serializers
from openedx.core.djangoapps.credit.models import CreditCourse
class CreditCourseSerializer(serializers.ModelSerializer):
""" CreditCourse Serializer """
class Meta(object): # pylint: disable=missing-docstring
model = CreditCourse
exclude = ('id',)
|
aksaxena80/test
|
refs/heads/master
|
tensorflow/python/kernel_tests/reshape_op_test.py
|
5
|
"""Tests for tensorflow.ops.reshape_op."""
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class ReshapeTest(tf.test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = tf.reshape(x, y)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testSComplexBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([1, 24]).astype(np.float32)
s = list(np.shape(x))
with self.test_session():
input_tensor = tf.constant(x, shape=[2, 3, 4])
reshape_out = tf.reshape(input_tensor, [1, 8, 3])
err = gc.ComputeGradientError(input_tensor, s,
reshape_out, s, x_init_value=x)
print("Reshape gradient error = " % err)
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testErrors(self):
x = tf.constant(0.0, shape=[1, 0, 3])
with self.assertRaisesRegexp(
ValueError, "cannot infer the missing input size"):
tf.reshape(x, [0, -1, 5])
y = tf.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegexp(ValueError, "isn't divisible by 17"):
tf.reshape(y, [17, -1])
def testPartialShapes(self):
x = tf.placeholder(tf.float32)
# Unknown input shape, partial new shape.
y = tf.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = tf.reshape(x, tf.placeholder(tf.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = tf.reshape(x, tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
if __name__ == "__main__":
tf.test.main()
|
ppokrovsky/pyvdp
|
refs/heads/master
|
pyvdp/pav/__init__.py
|
1
|
from .models import CardValidationModel
|
elipapa/kubernetes
|
refs/heads/master
|
translations/extract.py
|
23
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
# global holding all strings discovered
STRINGS = []
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
STRINGS.append((match.group(2), file, line_number))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubernetes/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubernetes/pkg/kubectl/cmd/util")', import_replace)
def replace(filename, matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
# gofmt the file again
from subprocess import call
call(["gofmt", "-s", "-w", filename])
# update the translation files
translation_files = [
"translations/kubectl/default/LC_MESSAGES/k8s.po",
"translations/kubectl/en_US/LC_MESSAGES/k8s.po",
]
for translation_filename in translation_files:
with open(translation_filename, "a") as tfile:
for translation_string in STRINGS:
msg_string = translation_string[0]
tfile.write('\n')
tfile.write('# https://github.com/kubernetes/kubernetes/blob/master/{}#L{}\n'.format(translation_string[1], translation_string[2]))
tfile.write('msgctxt {}\n'.format(msg_string))
tfile.write('msgid {}\n'.format(msg_string))
tfile.write('msgstr {}\n'.format(msg_string))
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH])
|
bulislaw/mbed-os
|
refs/heads/master
|
tools/test/config_test/test02/test_data.py
|
38
|
# This is similar to test1, but this time B2 also inherits from B1, which allows it to override its config data.
# B2 also overrides base1_2, like D1.
# The order of inheritace in F is also reversed ([D1, B2] instead of [B2, D1])
# Since the last override of base1_2 in inheritance order is in B2, base1_2 must now
# have the value that was overriden in B2, not in D1.
# This test also shows that multiple inheritance works for a simple diamond shaped inheritance pattern
expected_results = {
"f": {
"desc": "test multiple target inheritance (diamond shape)",
"target.base1_1": "v_base1_1_f",
"target.base1_2": "v_base1_2_b2",
"target.base1_3": "v_base1_3_b1",
"target.derived1": "v_derived1_d1",
"target.derived2": "v_derived2_f",
"target.base2_1": "v_base2_1_f",
"target.base2_2": "v_base2_2_b2",
"target.f1_1": "v_f1_1_f_override",
"target.f1_2": "v_f1_2_f"
},
"b2": {
"desc": "another single inheritance test",
"target.base1_1": "v_base1_1_b1",
"target.base1_2": "v_base1_2_b2",
"target.base1_3": "v_base1_3_b1",
"target.base2_1": "v_base2_1_b2",
"target.base2_2": "v_base2_2_b2"
}
}
|
jjmachan/activityPointsApp
|
refs/heads/master
|
activitypoints/lib/python3.5/site-packages/pip/_vendor/requests/certs.py
|
516
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
AndrewPeelMV/Blender2.78c
|
refs/heads/master
|
2.78/scripts/startup/bl_ui/properties_material.py
|
4
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
from bpy.app.translations import pgettext_iface as iface_
def active_node_mat(mat):
# TODO, 2.4x has a pipeline section, for 2.5 we need to communicate
# which settings from node-materials are used
if mat is not None:
mat_node = mat.active_node_material
if mat_node:
return mat_node
else:
return mat
return None
def check_material(mat):
if mat is not None:
if mat.use_nodes:
if mat.active_node_material is not None:
return True
return False
return True
return False
def simple_material(mat):
if (mat is not None) and (not mat.use_nodes):
return True
return False
class MATERIAL_MT_sss_presets(Menu):
bl_label = "SSS Presets"
preset_subdir = "sss"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class MATERIAL_MT_specials(Menu):
bl_label = "Material Specials"
def draw(self, context):
layout = self.layout
layout.operator("object.material_slot_copy", icon='COPY_ID')
layout.operator("material.copy", icon='COPYDOWN')
layout.operator("material.paste", icon='PASTEDOWN')
class MATERIAL_UL_matslots(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, bpy.types.MaterialSlot)
# ob = data
slot = item
ma = slot.material
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if ma:
layout.prop(ma, "name", text="", emboss=False, icon_value=icon)
else:
layout.label(text="", icon_value=icon)
if ma and not context.scene.render.use_shading_nodes:
manode = ma.active_node_material
if manode:
layout.label(text=iface_("Node %s") % manode.name, translate=False, icon_value=layout.icon(manode))
elif ma.use_nodes:
layout.label(text="Node <none>")
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MaterialButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
return context.material and (context.scene.render.engine in cls.COMPAT_ENGINES)
class MATERIAL_PT_context_material(MaterialButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
# An exception, don't call the parent poll func because
# this manages materials for all engine types
engine = context.scene.render.engine
return (context.material or context.object) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = context.material
ob = context.object
slot = context.material_slot
space = context.space_data
is_sortable = (len(ob.material_slots) > 1)
if ob:
rows = 1
if is_sortable:
rows = 4
row = layout.row()
row.template_list("MATERIAL_UL_matslots", "", ob, "material_slots", ob, "active_material_index", rows=rows)
col = row.column(align=True)
col.operator("object.material_slot_add", icon='ZOOMIN', text="")
col.operator("object.material_slot_remove", icon='ZOOMOUT', text="")
col.menu("MATERIAL_MT_specials", icon='DOWNARROW_HLT', text="")
if is_sortable:
col.separator()
col.operator("object.material_slot_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.material_slot_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.mode == 'EDIT':
row = layout.row(align=True)
row.operator("object.material_slot_assign", text="Assign")
row.operator("object.material_slot_select", text="Select")
row.operator("object.material_slot_deselect", text="Deselect")
split = layout.split(percentage=0.65)
if ob:
split.template_ID(ob, "active_material", new="material.new")
row = split.row()
if mat:
row.prop(mat, "use_nodes", icon='NODETREE', text="")
if slot:
row.prop(slot, "link", text="")
else:
row.label()
elif mat:
split.template_ID(space, "pin_id")
split.separator()
if mat:
layout.prop(mat, "type", expand=True)
if mat.use_nodes:
row = layout.row()
row.label(text="", icon='NODETREE')
if mat.active_node_material:
row.prop(mat.active_node_material, "name", text="")
else:
row.label(text="No material node selected")
class MATERIAL_PT_preview(MaterialButtonsPanel, Panel):
bl_label = "Preview"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
self.layout.template_preview(context.material)
class MATERIAL_PT_pipeline(MaterialButtonsPanel, Panel):
bl_label = "Render Pipeline Options"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and (not simple_material(mat)) and (mat.type in {'SURFACE', 'WIRE', 'VOLUME'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self. layout
mat = context.material
mat_type = mat.type in {'SURFACE', 'WIRE'}
row = layout.row()
row.active = mat_type
row.prop(mat, "use_transparency")
sub = row.column()
sub.prop(mat, "offset_z")
sub.active = mat_type and mat.use_transparency and mat.transparency_method == 'Z_TRANSPARENCY'
row = layout.row()
row.active = mat.use_transparency or not mat_type
row.prop(mat, "transparency_method", expand=True)
layout.separator()
split = layout.split()
col = split.column()
col.prop(mat, "use_raytrace")
col.prop(mat, "use_full_oversampling")
sub = col.column()
sub.active = mat_type
sub.prop(mat, "use_sky")
sub.prop(mat, "invert_z")
col.prop(mat, "pass_index")
col = split.column()
col.active = mat_type
col.prop(mat, "use_cast_shadows", text="Cast")
col.prop(mat, "use_cast_shadows_only", text="Cast Only")
col.prop(mat, "use_cast_buffer_shadows")
sub = col.column()
sub.active = mat.use_cast_buffer_shadows
sub.prop(mat, "shadow_cast_alpha", text="Casting Alpha")
col.prop(mat, "use_cast_approximate")
class MATERIAL_PT_diffuse(MaterialButtonsPanel, Panel):
bl_label = "Diffuse"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
split = layout.split()
col = split.column()
col.prop(mat, "diffuse_color", text="")
sub = col.column()
sub.active = (not mat.use_shadeless)
sub.prop(mat, "diffuse_intensity", text="Intensity")
col = split.column()
col.active = (not mat.use_shadeless)
col.prop(mat, "diffuse_shader", text="")
col.prop(mat, "use_diffuse_ramp", text="Ramp")
col = layout.column()
col.active = (not mat.use_shadeless)
if mat.diffuse_shader == 'OREN_NAYAR':
col.prop(mat, "roughness")
elif mat.diffuse_shader == 'MINNAERT':
col.prop(mat, "darkness")
elif mat.diffuse_shader == 'TOON':
row = col.row()
row.prop(mat, "diffuse_toon_size", text="Size")
row.prop(mat, "diffuse_toon_smooth", text="Smooth")
elif mat.diffuse_shader == 'FRESNEL':
row = col.row()
row.prop(mat, "diffuse_fresnel", text="Fresnel")
row.prop(mat, "diffuse_fresnel_factor", text="Factor")
if mat.use_diffuse_ramp:
col = layout.column()
col.active = (not mat.use_shadeless)
col.separator()
col.template_color_ramp(mat, "diffuse_ramp", expand=True)
col.separator()
row = col.row()
row.prop(mat, "diffuse_ramp_input", text="Input")
row.prop(mat, "diffuse_ramp_blend", text="Blend")
col.prop(mat, "diffuse_ramp_factor", text="Factor")
class MATERIAL_PT_specular(MaterialButtonsPanel, Panel):
bl_label = "Specular"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
layout.active = (not mat.use_shadeless)
split = layout.split()
col = split.column()
col.prop(mat, "specular_color", text="")
col.prop(mat, "specular_intensity", text="Intensity")
col = split.column()
col.prop(mat, "specular_shader", text="")
col.prop(mat, "use_specular_ramp", text="Ramp")
col = layout.column()
if mat.specular_shader in {'COOKTORR', 'PHONG'}:
col.prop(mat, "specular_hardness", text="Hardness")
elif mat.specular_shader == 'BLINN':
row = col.row()
row.prop(mat, "specular_hardness", text="Hardness")
row.prop(mat, "specular_ior", text="IOR")
elif mat.specular_shader == 'WARDISO':
col.prop(mat, "specular_slope", text="Slope")
elif mat.specular_shader == 'TOON':
row = col.row()
row.prop(mat, "specular_toon_size", text="Size")
row.prop(mat, "specular_toon_smooth", text="Smooth")
if mat.use_specular_ramp:
layout.separator()
layout.template_color_ramp(mat, "specular_ramp", expand=True)
layout.separator()
row = layout.row()
row.prop(mat, "specular_ramp_input", text="Input")
row.prop(mat, "specular_ramp_blend", text="Blend")
layout.prop(mat, "specular_ramp_factor", text="Factor")
class MATERIAL_PT_shading(MaterialButtonsPanel, Panel):
bl_label = "Shading"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
if mat.type in {'SURFACE', 'WIRE'}:
split = layout.split()
col = split.column()
sub = col.column()
sub.active = not mat.use_shadeless
sub.prop(mat, "emit")
sub.prop(mat, "ambient")
sub = col.column()
sub.prop(mat, "translucency")
col = split.column()
col.prop(mat, "use_shadeless")
sub = col.column()
sub.active = not mat.use_shadeless
sub.prop(mat, "use_tangent_shading")
sub.prop(mat, "use_cubic")
class MATERIAL_PT_transp(MaterialButtonsPanel, Panel):
bl_label = "Transparency"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
mat = context.material
if simple_material(mat):
self.layout.prop(mat, "use_transparency", text="")
def draw(self, context):
layout = self.layout
base_mat = context.material
mat = active_node_mat(context.material)
rayt = mat.raytrace_transparency
if simple_material(base_mat):
row = layout.row()
row.active = mat.use_transparency
row.prop(mat, "transparency_method", expand=True)
split = layout.split()
split.active = base_mat.use_transparency
col = split.column()
col.prop(mat, "alpha")
row = col.row()
row.active = (base_mat.transparency_method != 'MASK') and (not mat.use_shadeless)
row.prop(mat, "specular_alpha", text="Specular")
col = split.column()
col.active = (not mat.use_shadeless)
col.prop(rayt, "fresnel")
sub = col.column()
sub.active = (rayt.fresnel > 0.0)
sub.prop(rayt, "fresnel_factor", text="Blend")
if base_mat.transparency_method == 'RAYTRACE':
layout.separator()
split = layout.split()
split.active = base_mat.use_transparency
col = split.column()
col.prop(rayt, "ior")
col.prop(rayt, "filter")
col.prop(rayt, "falloff")
col.prop(rayt, "depth_max")
col.prop(rayt, "depth")
col = split.column()
col.label(text="Gloss:")
col.prop(rayt, "gloss_factor", text="Amount")
sub = col.column()
sub.active = rayt.gloss_factor < 1.0
sub.prop(rayt, "gloss_threshold", text="Threshold")
sub.prop(rayt, "gloss_samples", text="Samples")
class MATERIAL_PT_mirror(MaterialButtonsPanel, Panel):
bl_label = "Mirror"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
raym = active_node_mat(context.material).raytrace_mirror
self.layout.prop(raym, "use", text="")
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
raym = mat.raytrace_mirror
layout.active = raym.use
split = layout.split()
col = split.column()
col.prop(raym, "reflect_factor")
col.prop(mat, "mirror_color", text="")
col = split.column()
col.prop(raym, "fresnel")
sub = col.column()
sub.active = (raym.fresnel > 0.0)
sub.prop(raym, "fresnel_factor", text="Blend")
split = layout.split()
col = split.column()
col.separator()
col.prop(raym, "depth")
col.prop(raym, "distance", text="Max Dist")
col.separator()
sub = col.split(percentage=0.4)
sub.active = (raym.distance > 0.0)
sub.label(text="Fade To:")
sub.prop(raym, "fade_to", text="")
col = split.column()
col.label(text="Gloss:")
col.prop(raym, "gloss_factor", text="Amount")
sub = col.column()
sub.active = (raym.gloss_factor < 1.0)
sub.prop(raym, "gloss_threshold", text="Threshold")
sub.prop(raym, "gloss_samples", text="Samples")
sub.prop(raym, "gloss_anisotropic", text="Anisotropic")
class MATERIAL_PT_sss(MaterialButtonsPanel, Panel):
bl_label = "Subsurface Scattering"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
mat = active_node_mat(context.material)
sss = mat.subsurface_scattering
self.layout.active = (not mat.use_shadeless)
self.layout.prop(sss, "use", text="")
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
sss = mat.subsurface_scattering
layout.active = (sss.use) and (not mat.use_shadeless)
row = layout.row().split()
sub = row.row(align=True).split(align=True, percentage=0.75)
sub.menu("MATERIAL_MT_sss_presets", text=bpy.types.MATERIAL_MT_sss_presets.bl_label)
sub.operator("material.sss_preset_add", text="", icon='ZOOMIN')
sub.operator("material.sss_preset_add", text="", icon='ZOOMOUT').remove_active = True
split = layout.split()
col = split.column()
col.prop(sss, "ior")
col.prop(sss, "scale")
col.prop(sss, "color", text="")
col.prop(sss, "radius", text="RGB Radius", expand=True)
col = split.column()
sub = col.column(align=True)
sub.label(text="Blend:")
sub.prop(sss, "color_factor", text="Color")
sub.prop(sss, "texture_factor", text="Texture")
sub.label(text="Scattering Weight:")
sub.prop(sss, "front")
sub.prop(sss, "back")
col.separator()
col.prop(sss, "error_threshold", text="Error")
class MATERIAL_PT_halo(MaterialButtonsPanel, Panel):
bl_label = "Halo"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and (mat.type == 'HALO') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = context.material # don't use node material
halo = mat.halo
def number_but(layout, toggle, number, name, color):
row = layout.row(align=True)
row.prop(halo, toggle, text="")
sub = row.column(align=True)
sub.active = getattr(halo, toggle)
sub.prop(halo, number, text=name, translate=False)
if not color == "":
sub.prop(mat, color, text="")
split = layout.split()
col = split.column()
col.prop(mat, "alpha")
col.prop(mat, "diffuse_color", text="")
col.prop(halo, "seed")
col = split.column()
col.prop(halo, "size")
col.prop(halo, "hardness")
col.prop(halo, "add")
layout.label(text="Options:")
split = layout.split()
col = split.column()
col.prop(halo, "use_texture")
col.prop(halo, "use_vertex_normal")
col.prop(halo, "use_extreme_alpha")
col.prop(halo, "use_shaded")
col.prop(halo, "use_soft")
col = split.column()
number_but(col, "use_ring", "ring_count", iface_("Rings"), "mirror_color")
number_but(col, "use_lines", "line_count", iface_("Lines"), "specular_color")
number_but(col, "use_star", "star_tip_count", iface_("Star Tips"), "")
class MATERIAL_PT_flare(MaterialButtonsPanel, Panel):
bl_label = "Flare"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and (mat.type == 'HALO') and (engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
halo = context.material.halo
self.layout.prop(halo, "use_flare_mode", text="")
def draw(self, context):
layout = self.layout
mat = context.material # don't use node material
halo = mat.halo
layout.active = halo.use_flare_mode
split = layout.split()
col = split.column()
col.prop(halo, "flare_size", text="Size")
col.prop(halo, "flare_boost", text="Boost")
col.prop(halo, "flare_seed", text="Seed")
col = split.column()
col.prop(halo, "flare_subflare_count", text="Subflares")
col.prop(halo, "flare_subflare_size", text="Subsize")
class MATERIAL_PT_game_settings(MaterialButtonsPanel, Panel):
bl_label = "Game Settings"
COMPAT_ENGINES = {'BLENDER_GAME'}
@classmethod
def poll(cls, context):
return context.material and (context.scene.render.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
game = context.material.game_settings # don't use node material
row = layout.row()
row.prop(game, "use_backface_culling")
row.prop(game, "invisible")
row.prop(game, "text")
row = layout.row()
row.label(text="Alpha Blend:")
row.label(text="Face Orientation:")
row = layout.row()
row.prop(game, "alpha_blend", text="")
row.prop(game, "face_orientation", text="")
class MATERIAL_PT_physics(MaterialButtonsPanel, Panel):
bl_label = "Physics"
COMPAT_ENGINES = {'BLENDER_GAME'}
def draw_header(self, context):
game = context.material.game_settings
self.layout.prop(game, "physics", text="")
@classmethod
def poll(cls, context):
return context.material and (context.scene.render.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.active = context.material.game_settings.physics
phys = context.material.physics # don't use node material
split = layout.split()
row = split.row()
row.prop(phys, "friction")
row.prop(phys, "elasticity", slider=True)
row = layout.row()
row.label(text="Force Field:")
row = layout.row()
row.prop(phys, "fh_force")
row.prop(phys, "fh_damping", slider=True)
row = layout.row()
row.prop(phys, "fh_distance")
row.prop(phys, "use_fh_normal")
class MATERIAL_PT_strand(MaterialButtonsPanel, Panel):
bl_label = "Strand"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and (mat.type in {'SURFACE', 'WIRE', 'HALO'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = context.material # don't use node material
tan = mat.strand
split = layout.split()
col = split.column()
sub = col.column(align=True)
sub.label(text="Size:")
sub.prop(tan, "root_size", text="Root")
sub.prop(tan, "tip_size", text="Tip")
sub.prop(tan, "size_min", text="Minimum")
sub.prop(tan, "use_blender_units")
sub = col.column()
sub.active = (not mat.use_shadeless)
sub.prop(tan, "use_tangent_shading")
col.prop(tan, "shape")
col = split.column()
col.label(text="Shading:")
col.prop(tan, "width_fade")
ob = context.object
if ob and ob.type == 'MESH':
col.prop_search(tan, "uv_layer", ob.data, "uv_textures", text="")
else:
col.prop(tan, "uv_layer", text="")
col.separator()
sub = col.column()
sub.active = (not mat.use_shadeless)
sub.label("Surface diffuse:")
sub = col.column()
sub.prop(tan, "blend_distance", text="Distance")
class MATERIAL_PT_options(MaterialButtonsPanel, Panel):
bl_label = "Options"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
base_mat = context.material
mat = active_node_mat(base_mat)
split = layout.split()
col = split.column()
if simple_material(base_mat):
col.prop(mat, "use_raytrace")
col.prop(mat, "use_full_oversampling")
col.prop(mat, "use_sky")
col.prop(mat, "use_mist")
if simple_material(base_mat):
col.prop(mat, "invert_z")
sub = col.row()
sub.prop(mat, "offset_z")
sub.active = mat.use_transparency and mat.transparency_method == 'Z_TRANSPARENCY'
sub = col.column(align=True)
sub.label(text="Light Group:")
sub.prop(mat, "light_group", text="")
row = sub.row(align=True)
row.active = bool(mat.light_group)
row.prop(mat, "use_light_group_exclusive", text="Exclusive")
row.prop(mat, "use_light_group_local", text="Local")
col = split.column()
col.prop(mat, "use_face_texture")
sub = col.column()
sub.active = mat.use_face_texture
sub.prop(mat, "use_face_texture_alpha")
col.separator()
col.prop(mat, "use_vertex_color_paint")
col.prop(mat, "use_vertex_color_light")
col.prop(mat, "use_object_color")
col.prop(mat, "use_uv_project")
if simple_material(base_mat):
col.prop(mat, "pass_index")
class MATERIAL_PT_shadow(MaterialButtonsPanel, Panel):
bl_label = "Shadow"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type in {'SURFACE', 'WIRE'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
base_mat = context.material
mat = active_node_mat(base_mat)
split = layout.split()
col = split.column()
col.prop(mat, "use_shadows", text="Receive")
col.prop(mat, "use_transparent_shadows", text="Receive Transparent")
col.prop(mat, "use_only_shadow", text="Shadows Only")
sub = col.column()
sub.active = mat.use_only_shadow
sub.prop(mat, "shadow_only_type", text="")
if not simple_material(base_mat):
col = split.column()
col.prop(mat, "use_ray_shadow_bias", text="Auto Ray Bias")
sub = col.column()
sub.active = (not mat.use_ray_shadow_bias)
sub.prop(mat, "shadow_ray_bias", text="Ray Bias")
if simple_material(base_mat):
col = split.column()
col.prop(mat, "use_cast_shadows", text="Cast")
col.prop(mat, "use_cast_shadows_only", text="Cast Only")
col.prop(mat, "use_cast_buffer_shadows")
sub = col.column()
sub.active = mat.use_cast_buffer_shadows
if simple_material(base_mat):
sub.prop(mat, "shadow_cast_alpha", text="Casting Alpha")
sub.prop(mat, "shadow_buffer_bias", text="Buffer Bias")
if simple_material(base_mat):
col.prop(mat, "use_cast_approximate")
class MATERIAL_PT_transp_game(MaterialButtonsPanel, Panel):
bl_label = "Transparency"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_GAME'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
mat = context.material
if simple_material(mat):
self.layout.prop(mat, "use_transparency", text="")
def draw(self, context):
layout = self.layout
base_mat = context.material
mat = active_node_mat(base_mat)
layout.active = mat.use_transparency
if simple_material(base_mat):
row = layout.row()
row.prop(mat, "transparency_method", expand=True)
layout.prop(mat, "alpha")
layout.prop(mat, "specular_alpha", text="Specular")
class VolumeButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and (mat.type == 'VOLUME') and (engine in cls.COMPAT_ENGINES)
class MATERIAL_PT_volume_density(VolumeButtonsPanel, Panel):
bl_label = "Density"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
vol = context.material.volume # don't use node material
row = layout.row()
row.prop(vol, "density")
row.prop(vol, "density_scale")
class MATERIAL_PT_volume_shading(VolumeButtonsPanel, Panel):
bl_label = "Shading"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
vol = context.material.volume # don't use node material
split = layout.split()
col = split.column()
col.prop(vol, "scattering")
col.prop(vol, "asymmetry")
col.prop(vol, "transmission_color")
col = split.column()
sub = col.column(align=True)
sub.prop(vol, "emission")
sub.prop(vol, "emission_color", text="")
sub = col.column(align=True)
sub.prop(vol, "reflection")
sub.prop(vol, "reflection_color", text="")
class MATERIAL_PT_volume_lighting(VolumeButtonsPanel, Panel):
bl_label = "Lighting"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
vol = context.material.volume # don't use node material
split = layout.split()
col = split.column()
col.prop(vol, "light_method", text="")
col = split.column()
if vol.light_method == 'SHADED':
col.prop(vol, "use_external_shadows")
col.prop(vol, "use_light_cache")
sub = col.column()
sub.active = vol.use_light_cache
sub.prop(vol, "cache_resolution")
elif vol.light_method in {'MULTIPLE_SCATTERING', 'SHADED_PLUS_MULTIPLE_SCATTERING'}:
sub = col.column()
sub.enabled = True
sub.active = False
sub.label("Light Cache Enabled")
col.prop(vol, "cache_resolution")
sub = col.column(align=True)
sub.prop(vol, "ms_diffusion")
sub.prop(vol, "ms_spread")
sub.prop(vol, "ms_intensity")
class MATERIAL_PT_volume_transp(VolumeButtonsPanel, Panel):
bl_label = "Transparency"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return mat and simple_material(mat) and (mat.type == 'VOLUME') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = context.material # don't use node material
layout.prop(mat, "transparency_method", expand=True)
class MATERIAL_PT_volume_integration(VolumeButtonsPanel, Panel):
bl_label = "Integration"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
vol = context.material.volume # don't use node material
split = layout.split()
col = split.column()
col.label(text="Step Calculation:")
col.prop(vol, "step_method", text="")
col = col.column(align=True)
col.prop(vol, "step_size")
col = split.column()
col.label()
col.prop(vol, "depth_threshold")
class MATERIAL_PT_volume_options(VolumeButtonsPanel, Panel):
bl_label = "Options"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
mat = context.material
engine = context.scene.render.engine
return check_material(mat) and (mat.type == 'VOLUME') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
mat = active_node_mat(context.material)
split = layout.split()
col = split.column()
if simple_material(context.material):
col.prop(mat, "use_raytrace")
col.prop(mat, "use_full_oversampling")
col.prop(mat, "use_mist")
col = split.column()
col.label(text="Light Group:")
col.prop(mat, "light_group", text="")
row = col.row()
row.active = bool(mat.light_group)
row.prop(mat, "use_light_group_exclusive", text="Exclusive")
class MATERIAL_PT_custom_props(MaterialButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "material"
_property_type = bpy.types.Material
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
ChanChiChoi/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/base.py
|
313
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
AzamYahya/shogun
|
refs/heads/develop
|
examples/undocumented/python_modular/classifier_svmocas_modular.py
|
17
|
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_twoclass.dat'
parameter_list = [[traindat,testdat,label_traindat,0.9,1e-5,1],[traindat,testdat,label_traindat,0.8,1e-5,1]]
def classifier_svmocas_modular (train_fname=traindat,test_fname=testdat,label_fname=label_traindat,C=0.9,epsilon=1e-5,num_threads=1):
from modshogun import RealFeatures, BinaryLabels
from modshogun import SVMOcas, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
labels=BinaryLabels(CSVFile(label_fname))
svm=SVMOcas(C, feats_train, labels)
svm.set_epsilon(epsilon)
svm.parallel.set_num_threads(num_threads)
svm.set_bias_enabled(False)
svm.train()
bias=svm.get_bias()
w=svm.get_w()
predictions = svm.apply(feats_test)
return predictions, svm, predictions.get_labels()
if __name__=='__main__':
print('SVMOcas')
classifier_svmocas_modular(*parameter_list[0])
|
roopali8/tempest
|
refs/heads/master
|
tempest/api/compute/security_groups/test_security_group_rules_negative.py
|
11
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute.security_groups import base
from tempest import config
from tempest import test
CONF = config.CONF
def not_existing_id():
if CONF.service_available.neutron:
return data_utils.rand_uuid()
else:
return data_utils.rand_int_id(start=999)
class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def setup_clients(cls):
super(SecurityGroupRulesNegativeTestJSON, cls).setup_clients()
cls.client = cls.security_groups_client
@test.attr(type=['negative'])
@test.idempotent_id('1d507e98-7951-469b-82c3-23f1e6b8c254')
@test.services('network')
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
# Adding rules to the non existent Security Group id
parent_group_id = not_existing_id()
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(lib_exc.NotFound,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('2244d7e4-adb7-4ecb-9930-2d77e123ce4f')
@test.services('network')
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
# Adding rules to the non int Security Group id
parent_group_id = data_utils.rand_name('non_int_id')
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('8bd56d02-3ffa-4d67-9933-b6b9a01d6089')
@test.services('network')
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
rule = \
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port,
to_port)
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
# Add the same rule to the group should fail
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('84c81249-9f6e-439c-9bbf-cbb0d2cddbdf')
@test.services('network')
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('12bbc875-1045-4f7a-be46-751277baedb9')
@test.services('network')
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=65536)
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('ff88804d-144f-45d1-bf59-dd155838a43a')
@test.services('network')
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
parent_group_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('00296fa9-0576-496a-ae15-fbab843189e0')
@test.services('network')
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
sg = self.create_security_group()
# Adding a rule to the created Security Group
secgroup_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
@test.attr(type=['negative'])
@test.idempotent_id('56fddcca-dbb8-4494-a0db-96e9f869527c')
@test.services('network')
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
non_existent_rule_id = not_existing_id()
self.assertRaises(lib_exc.NotFound,
self.client.delete_security_group_rule,
non_existent_rule_id)
|
nsnam/ns-3-dev-git
|
refs/heads/master
|
src/openflow/test/examples-to-run.py
|
4
|
#! /usr/bin/env python3
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("openflow-switch", "ENABLE_OPENFLOW == True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
bevacqua/yowsup
|
refs/heads/master
|
src/Yowsup/Common/Http/warequest.py
|
18
|
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import urllib,sys, os
if sys.version_info < (3, 0):
import httplib
from urllib import urlencode
else:
from http import client as httplib
from urllib.parse import urlencode
import hashlib
from .waresponseparser import ResponseParser
from Yowsup.Common.debugger import Debugger as WADebug
from Yowsup.Common.constants import Constants
from Yowsup.Common.utilities import Utilities
class WARequest(object):
OK = 200
#moved to Constants
def __init__(self):
WADebug.attach(self)
self.pvars = [];
self.port = 443;
self.type = "GET"
self.parser = None
self.params = []
self.headers = {}
self.sent = False
self.response = None
def setParsableVariables(self, pvars):
self.pvars = pvars;
def onResponse(self, name, value):
if name == "status":
self.status = value
elif name == "result":
self.result = value
def addParam(self,name,value):
self.params.append((name,value.encode('utf-8')))
def removeParam(self, name):
for i in range(0, len(self.params)):
if self.params[i][0] == name:
del self.params[i]
def addHeaderField(self, name, value):
self.headers[name] = value;
def clearParams(self):
self.params = []
def getUserAgent(self):
tokenData = Utilities.readToken()
if tokenData:
agent = tokenData["u"]
else:
agent = Constants.tokenData["u"]
return agent
def getToken(self, phone, token):
return hashlib.md5(token.format(phone=phone).encode()).hexdigest()
def send(self, parser = None):
if self.type == "POST":
return self.sendPostRequest(parser)
return self.sendGetRequest(parser)
def setParser(self, parser):
if isinstance(parser, ResponseParser):
self.parser = parser
else:
self._d("Invalid parser")
def getConnectionParameters(self):
if not self.url:
return ("", "", self.port)
try:
url = self.url.split("://", 1)
url = url[0] if len(url) == 1 else url[1]
host, path = url.split('/', 1)
except ValueError:
host = url
path = ""
path = "/" + path
return (host, self.port, path)
def sendGetRequest(self, parser = None):
self.response = None
params = self.params#[param.items()[0] for param in self.params];
parser = parser or self.parser or ResponseParser()
headers = dict(list({"User-Agent":self.getUserAgent(),
"Accept": parser.getMeta()
}.items()) + list(self.headers.items()));
host,port,path = self.getConnectionParameters()
self.response = WARequest.sendRequest(host, port, path, headers, params, "GET")
if not self.response.status == WARequest.OK:
self._d("Request not success, status was %s"%self.response.status)
return {}
data = self.response.read()
self._d(data);
self.sent = True
return parser.parse(data.decode(), self.pvars)
def sendPostRequest(self, parser = None):
self.response = None
params = self.params #[param.items()[0] for param in self.params];
parser = parser or self.parser or ResponseParser()
headers = dict(list({"User-Agent":self.getUserAgent(),
"Accept": parser.getMeta(),
"Content-Type":"application/x-www-form-urlencoded"
}.items()) + list(self.headers.items()));
host,port,path = self.getConnectionParameters()
self.response = WARequest.sendRequest(host, port, path, headers, params, "POST")
if not self.response.status == WARequest.OK:
self._d("Request not success, status was %s"%self.response.status)
return {}
data = self.response.read()
self._d(data);
self.sent = True
return parser.parse(data.decode(), self.pvars)
@staticmethod
def sendRequest(host, port, path, headers, params, reqType="GET"):
params = urlencode(params);
path = path + "?"+ params if reqType == "GET" and params else path
if len(headers):
WADebug.stdDebug(headers)
if len(params):
WADebug.stdDebug(params)
WADebug.stdDebug("Opening connection to %s" % host);
conn = httplib.HTTPSConnection(host ,port) if port == 443 else httplib.HTTPConnection(host ,port)
WADebug.stdDebug("Sending %s request to %s" % (reqType, path))
conn.request(reqType, path, params, headers);
response = conn.getresponse()
return response
|
ajgallegog/gem5_arm
|
refs/heads/master
|
ext/ply/example/GardenSnake/GardenSnake.py
|
166
|
# GardenSnake - a parser generator demonstration program
#
# This implements a modified version of a subset of Python:
# - only 'def', 'return' and 'if' statements
# - 'if' only has 'then' clause (no elif nor else)
# - single-quoted strings only, content in raw format
# - numbers are decimal.Decimal instances (not integers or floats)
# - no print statment; use the built-in 'print' function
# - only < > == + - / * implemented (and unary + -)
# - assignment and tuple assignment work
# - no generators of any sort
# - no ... well, no quite a lot
# Why? I'm thinking about a new indentation-based configuration
# language for a project and wanted to figure out how to do it. Once
# I got that working I needed a way to test it out. My original AST
# was dumb so I decided to target Python's AST and compile it into
# Python code. Plus, it's pretty cool that it only took a day or so
# from sitting down with Ply to having working code.
# This uses David Beazley's Ply from http://www.dabeaz.com/ply/
# This work is hereby released into the Public Domain. To view a copy of
# the public domain dedication, visit
# http://creativecommons.org/licenses/publicdomain/ or send a letter to
# Creative Commons, 543 Howard Street, 5th Floor, San Francisco,
# California, 94105, USA.
#
# Portions of this work are derived from Python's Grammar definition
# and may be covered under the Python copyright and license
#
# Andrew Dalke / Dalke Scientific Software, LLC
# 30 August 2006 / Cape Town, South Africa
# Changelog:
# 30 August - added link to CC license; removed the "swapcase" encoding
# Modifications for inclusion in PLY distribution
import sys
sys.path.insert(0,"../..")
from ply import *
##### Lexer ######
#import lex
import decimal
tokens = (
'DEF',
'IF',
'NAME',
'NUMBER', # Python decimals
'STRING', # single quoted strings only; syntax of raw strings
'LPAR',
'RPAR',
'COLON',
'EQ',
'ASSIGN',
'LT',
'GT',
'PLUS',
'MINUS',
'MULT',
'DIV',
'RETURN',
'WS',
'NEWLINE',
'COMMA',
'SEMICOLON',
'INDENT',
'DEDENT',
'ENDMARKER',
)
#t_NUMBER = r'\d+'
# taken from decmial.py but without the leading sign
def t_NUMBER(t):
r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
t.value = decimal.Decimal(t.value)
return t
def t_STRING(t):
r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
t.value=t.value[1:-1].decode("string-escape") # .swapcase() # for fun
return t
t_COLON = r':'
t_EQ = r'=='
t_ASSIGN = r'='
t_LT = r'<'
t_GT = r'>'
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'/'
t_COMMA = r','
t_SEMICOLON = r';'
# Ply nicely documented how to do this.
RESERVED = {
"def": "DEF",
"if": "IF",
"return": "RETURN",
}
def t_NAME(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = RESERVED.get(t.value, "NAME")
return t
# Putting this before t_WS let it consume lines with only comments in
# them so the latter code never sees the WS part. Not consuming the
# newline. Needed for "if 1: #comment"
def t_comment(t):
r"[ ]*\043[^\n]*" # \043 is '#'
pass
# Whitespace
def t_WS(t):
r' [ ]+ '
if t.lexer.at_line_start and t.lexer.paren_count == 0:
return t
# Don't generate newline tokens when inside of parenthesis, eg
# a = (1,
# 2, 3)
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t.type = "NEWLINE"
if t.lexer.paren_count == 0:
return t
def t_LPAR(t):
r'\('
t.lexer.paren_count += 1
return t
def t_RPAR(t):
r'\)'
# check for underflow? should be the job of the parser
t.lexer.paren_count -= 1
return t
def t_error(t):
raise SyntaxError("Unknown symbol %r" % (t.value[0],))
print "Skipping", repr(t.value[0])
t.lexer.skip(1)
## I implemented INDENT / DEDENT generation as a post-processing filter
# The original lex token stream contains WS and NEWLINE characters.
# WS will only occur before any other tokens on a line.
# I have three filters. One tags tokens by adding two attributes.
# "must_indent" is True if the token must be indented from the
# previous code. The other is "at_line_start" which is True for WS
# and the first non-WS/non-NEWLINE on a line. It flags the check so
# see if the new line has changed indication level.
# Python's syntax has three INDENT states
# 0) no colon hence no need to indent
# 1) "if 1: go()" - simple statements have a COLON but no need for an indent
# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
NO_INDENT = 0
MAY_INDENT = 1
MUST_INDENT = 2
# only care about whitespace at the start of a line
def track_tokens_filter(lexer, tokens):
lexer.at_line_start = at_line_start = True
indent = NO_INDENT
saw_colon = False
for token in tokens:
token.at_line_start = at_line_start
if token.type == "COLON":
at_line_start = False
indent = MAY_INDENT
token.must_indent = False
elif token.type == "NEWLINE":
at_line_start = True
if indent == MAY_INDENT:
indent = MUST_INDENT
token.must_indent = False
elif token.type == "WS":
assert token.at_line_start == True
at_line_start = True
token.must_indent = False
else:
# A real token; only indent after COLON NEWLINE
if indent == MUST_INDENT:
token.must_indent = True
else:
token.must_indent = False
at_line_start = False
indent = NO_INDENT
yield token
lexer.at_line_start = at_line_start
def _new_token(type, lineno):
tok = lex.LexToken()
tok.type = type
tok.value = None
tok.lineno = lineno
return tok
# Synthesize a DEDENT tag
def DEDENT(lineno):
return _new_token("DEDENT", lineno)
# Synthesize an INDENT tag
def INDENT(lineno):
return _new_token("INDENT", lineno)
# Track the indentation level and emit the right INDENT / DEDENT events.
def indentation_filter(tokens):
# A stack of indentation levels; will never pop item 0
levels = [0]
token = None
depth = 0
prev_was_ws = False
for token in tokens:
## if 1:
## print "Process", token,
## if token.at_line_start:
## print "at_line_start",
## if token.must_indent:
## print "must_indent",
## print
# WS only occurs at the start of the line
# There may be WS followed by NEWLINE so
# only track the depth here. Don't indent/dedent
# until there's something real.
if token.type == "WS":
assert depth == 0
depth = len(token.value)
prev_was_ws = True
# WS tokens are never passed to the parser
continue
if token.type == "NEWLINE":
depth = 0
if prev_was_ws or token.at_line_start:
# ignore blank lines
continue
# pass the other cases on through
yield token
continue
# then it must be a real token (not WS, not NEWLINE)
# which can affect the indentation level
prev_was_ws = False
if token.must_indent:
# The current depth must be larger than the previous level
if not (depth > levels[-1]):
raise IndentationError("expected an indented block")
levels.append(depth)
yield INDENT(token.lineno)
elif token.at_line_start:
# Must be on the same level or one of the previous levels
if depth == levels[-1]:
# At the same level
pass
elif depth > levels[-1]:
raise IndentationError("indentation increase but not in new block")
else:
# Back up; but only if it matches a previous level
try:
i = levels.index(depth)
except ValueError:
raise IndentationError("inconsistent indentation")
for _ in range(i+1, len(levels)):
yield DEDENT(token.lineno)
levels.pop()
yield token
### Finished processing ###
# Must dedent any remaining levels
if len(levels) > 1:
assert token is not None
for _ in range(1, len(levels)):
yield DEDENT(token.lineno)
# The top-level filter adds an ENDMARKER, if requested.
# Python's grammar uses it.
def filter(lexer, add_endmarker = True):
token = None
tokens = iter(lexer.token, None)
tokens = track_tokens_filter(lexer, tokens)
for token in indentation_filter(tokens):
yield token
if add_endmarker:
lineno = 1
if token is not None:
lineno = token.lineno
yield _new_token("ENDMARKER", lineno)
# Combine Ply and my filters into a new lexer
class IndentLexer(object):
def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
self.lexer = lex.lex(debug=debug, optimize=optimize, lextab=lextab, reflags=reflags)
self.token_stream = None
def input(self, s, add_endmarker=True):
self.lexer.paren_count = 0
self.lexer.input(s)
self.token_stream = filter(self.lexer, add_endmarker)
def token(self):
try:
return self.token_stream.next()
except StopIteration:
return None
########## Parser (tokens -> AST) ######
# also part of Ply
#import yacc
# I use the Python AST
from compiler import ast
# Helper function
def Assign(left, right):
names = []
if isinstance(left, ast.Name):
# Single assignment on left
return ast.Assign([ast.AssName(left.name, 'OP_ASSIGN')], right)
elif isinstance(left, ast.Tuple):
# List of things - make sure they are Name nodes
names = []
for child in left.getChildren():
if not isinstance(child, ast.Name):
raise SyntaxError("that assignment not supported")
names.append(child.name)
ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
return ast.Assign([ast.AssTuple(ass_list)], right)
else:
raise SyntaxError("Can't do that yet")
# The grammar comments come from Python's Grammar/Grammar file
## NB: compound_stmt in single_input is followed by extra NEWLINE!
# file_input: (NEWLINE | stmt)* ENDMARKER
def p_file_input_end(p):
"""file_input_end : file_input ENDMARKER"""
p[0] = ast.Stmt(p[1])
def p_file_input(p):
"""file_input : file_input NEWLINE
| file_input stmt
| NEWLINE
| stmt"""
if isinstance(p[len(p)-1], basestring):
if len(p) == 3:
p[0] = p[1]
else:
p[0] = [] # p == 2 --> only a blank line
else:
if len(p) == 3:
p[0] = p[1] + p[2]
else:
p[0] = p[1]
# funcdef: [decorators] 'def' NAME parameters ':' suite
# ignoring decorators
def p_funcdef(p):
"funcdef : DEF NAME parameters COLON suite"
p[0] = ast.Function(None, p[2], tuple(p[3]), (), 0, None, p[5])
# parameters: '(' [varargslist] ')'
def p_parameters(p):
"""parameters : LPAR RPAR
| LPAR varargslist RPAR"""
if len(p) == 3:
p[0] = []
else:
p[0] = p[2]
# varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
# highly simplified
def p_varargslist(p):
"""varargslist : varargslist COMMA NAME
| NAME"""
if len(p) == 4:
p[0] = p[1] + p[3]
else:
p[0] = [p[1]]
# stmt: simple_stmt | compound_stmt
def p_stmt_simple(p):
"""stmt : simple_stmt"""
# simple_stmt is a list
p[0] = p[1]
def p_stmt_compound(p):
"""stmt : compound_stmt"""
p[0] = [p[1]]
# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
def p_simple_stmt(p):
"""simple_stmt : small_stmts NEWLINE
| small_stmts SEMICOLON NEWLINE"""
p[0] = p[1]
def p_small_stmts(p):
"""small_stmts : small_stmts SEMICOLON small_stmt
| small_stmt"""
if len(p) == 4:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
# small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
# import_stmt | global_stmt | exec_stmt | assert_stmt
def p_small_stmt(p):
"""small_stmt : flow_stmt
| expr_stmt"""
p[0] = p[1]
# expr_stmt: testlist (augassign (yield_expr|testlist) |
# ('=' (yield_expr|testlist))*)
# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
# '<<=' | '>>=' | '**=' | '//=')
def p_expr_stmt(p):
"""expr_stmt : testlist ASSIGN testlist
| testlist """
if len(p) == 2:
# a list of expressions
p[0] = ast.Discard(p[1])
else:
p[0] = Assign(p[1], p[3])
def p_flow_stmt(p):
"flow_stmt : return_stmt"
p[0] = p[1]
# return_stmt: 'return' [testlist]
def p_return_stmt(p):
"return_stmt : RETURN testlist"
p[0] = ast.Return(p[2])
def p_compound_stmt(p):
"""compound_stmt : if_stmt
| funcdef"""
p[0] = p[1]
def p_if_stmt(p):
'if_stmt : IF test COLON suite'
p[0] = ast.If([(p[2], p[4])], None)
def p_suite(p):
"""suite : simple_stmt
| NEWLINE INDENT stmts DEDENT"""
if len(p) == 2:
p[0] = ast.Stmt(p[1])
else:
p[0] = ast.Stmt(p[3])
def p_stmts(p):
"""stmts : stmts stmt
| stmt"""
if len(p) == 3:
p[0] = p[1] + p[2]
else:
p[0] = p[1]
## No using Python's approach because Ply supports precedence
# comparison: expr (comp_op expr)*
# arith_expr: term (('+'|'-') term)*
# term: factor (('*'|'/'|'%'|'//') factor)*
# factor: ('+'|'-'|'~') factor | power
# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
def make_lt_compare((left, right)):
return ast.Compare(left, [('<', right),])
def make_gt_compare((left, right)):
return ast.Compare(left, [('>', right),])
def make_eq_compare((left, right)):
return ast.Compare(left, [('==', right),])
binary_ops = {
"+": ast.Add,
"-": ast.Sub,
"*": ast.Mul,
"/": ast.Div,
"<": make_lt_compare,
">": make_gt_compare,
"==": make_eq_compare,
}
unary_ops = {
"+": ast.UnaryAdd,
"-": ast.UnarySub,
}
precedence = (
("left", "EQ", "GT", "LT"),
("left", "PLUS", "MINUS"),
("left", "MULT", "DIV"),
)
def p_comparison(p):
"""comparison : comparison PLUS comparison
| comparison MINUS comparison
| comparison MULT comparison
| comparison DIV comparison
| comparison LT comparison
| comparison EQ comparison
| comparison GT comparison
| PLUS comparison
| MINUS comparison
| power"""
if len(p) == 4:
p[0] = binary_ops[p[2]]((p[1], p[3]))
elif len(p) == 3:
p[0] = unary_ops[p[1]](p[2])
else:
p[0] = p[1]
# power: atom trailer* ['**' factor]
# trailers enables function calls. I only allow one level of calls
# so this is 'trailer'
def p_power(p):
"""power : atom
| atom trailer"""
if len(p) == 2:
p[0] = p[1]
else:
if p[2][0] == "CALL":
p[0] = ast.CallFunc(p[1], p[2][1], None, None)
else:
raise AssertionError("not implemented")
def p_atom_name(p):
"""atom : NAME"""
p[0] = ast.Name(p[1])
def p_atom_number(p):
"""atom : NUMBER
| STRING"""
p[0] = ast.Const(p[1])
def p_atom_tuple(p):
"""atom : LPAR testlist RPAR"""
p[0] = p[2]
# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
def p_trailer(p):
"trailer : LPAR arglist RPAR"
p[0] = ("CALL", p[2])
# testlist: test (',' test)* [',']
# Contains shift/reduce error
def p_testlist(p):
"""testlist : testlist_multi COMMA
| testlist_multi """
if len(p) == 2:
p[0] = p[1]
else:
# May need to promote singleton to tuple
if isinstance(p[1], list):
p[0] = p[1]
else:
p[0] = [p[1]]
# Convert into a tuple?
if isinstance(p[0], list):
p[0] = ast.Tuple(p[0])
def p_testlist_multi(p):
"""testlist_multi : testlist_multi COMMA test
| test"""
if len(p) == 2:
# singleton
p[0] = p[1]
else:
if isinstance(p[1], list):
p[0] = p[1] + [p[3]]
else:
# singleton -> tuple
p[0] = [p[1], p[3]]
# test: or_test ['if' or_test 'else' test] | lambdef
# as I don't support 'and', 'or', and 'not' this works down to 'comparison'
def p_test(p):
"test : comparison"
p[0] = p[1]
# arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
# XXX INCOMPLETE: this doesn't allow the trailing comma
def p_arglist(p):
"""arglist : arglist COMMA argument
| argument"""
if len(p) == 4:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
# argument: test [gen_for] | test '=' test # Really [keyword '='] test
def p_argument(p):
"argument : test"
p[0] = p[1]
def p_error(p):
#print "Error!", repr(p)
raise SyntaxError(p)
class GardenSnakeParser(object):
def __init__(self, lexer = None):
if lexer is None:
lexer = IndentLexer()
self.lexer = lexer
self.parser = yacc.yacc(start="file_input_end")
def parse(self, code):
self.lexer.input(code)
result = self.parser.parse(lexer = self.lexer)
return ast.Module(None, result)
###### Code generation ######
from compiler import misc, syntax, pycodegen
class GardenSnakeCompiler(object):
def __init__(self):
self.parser = GardenSnakeParser()
def compile(self, code, filename="<string>"):
tree = self.parser.parse(code)
#print tree
misc.set_filename(filename, tree)
syntax.check(tree)
gen = pycodegen.ModuleCodeGenerator(tree)
code = gen.getCode()
return code
####### Test code #######
compile = GardenSnakeCompiler().compile
code = r"""
print('LET\'S TRY THIS \\OUT')
#Comment here
def x(a):
print('called with',a)
if a == 1:
return 2
if a*2 > 10: return 999 / 4
# Another comment here
return a+2*3
ints = (1, 2,
3, 4,
5)
print('mutiline-expression', ints)
t = 4+1/3*2+6*(9-5+1)
print('predence test; should be 34+2/3:', t, t==(34+2/3))
print('numbers', 1,2,3,4,5)
if 1:
8
a=9
print(x(a))
print(x(1))
print(x(2))
print(x(8),'3')
print('this is decimal', 1/5)
print('BIG DECIMAL', 1.234567891234567e12345)
"""
# Set up the GardenSnake run-time environment
def print_(*args):
print "-->", " ".join(map(str,args))
globals()["print"] = print_
compiled_code = compile(code)
exec compiled_code in globals()
print "Done"
|
rmanzano-sps/is210-week-03-synthesizing
|
refs/heads/master
|
task_03.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides Napoleon's great statement."""
NAPOLEON = "Able was I, ere I saw Elba."
REVERSED = NAPOLEON[::-1]
REVERSED = REVERSED.lower()
|
40223112/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/_weakrefset.py
|
766
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
freeman-lab/dask
|
refs/heads/master
|
dask/distributed/__init__.py
|
13
|
from .worker import Worker
from .scheduler import Scheduler
from .client import Client
from .ipython_utils import dask_client_from_ipclient
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/mlcompletion/prev2calls/sysStdinReadlinesIter.py
|
10
|
import sys
for line in sys.stdin.<caret>:
print(line)
|
chokribr/invenio
|
refs/heads/master
|
invenio/modules/communities/models.py
|
6
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Community models.
A layer around Invenio's collections and portalboxes for easier allowing
end-users to create their own collections.
from invenio.modules.communities.models import Community
from invenio.ext.sqlalchemy import db
u = Community.query.get('test5')
u = Community(
id='test5',
id_user=176,
title='European Middleware Initiative',
description='Bla bla',
curation_policy='Bla'
)
db.session.add(u)
db.session.commit()
u.save_collection(provisional=False)
After call to save_collection() you must do the following:
- Clear redis cache key for collection
- Run webcoll immediately for only this collection
"""
from datetime import datetime
from flask import url_for
from invenio.base.globals import cfg
from invenio.config import CFG_SITE_LANG
from invenio.ext.sqlalchemy import db
from invenio.ext.template import render_template_to_string
from invenio.legacy.bibrecord import record_add_field
from invenio.modules.access.models import \
AccACTION, AccARGUMENT, \
AccAuthorization, AccROLE, UserAccROLE
from invenio.modules.accounts.models import User
from invenio.modules.communities.signals import before_save_collection, \
after_save_collection, before_save_collections, after_save_collections, \
before_delete_collection, after_delete_collection, \
before_delete_collections, after_delete_collections, \
pre_curation, post_curation
from invenio.modules.collections.models import \
Collection, \
CollectionCollection, \
CollectionFormat, \
CollectionPortalbox, \
Collectiondetailedrecordpagetabs, \
Collectionname, \
Portalbox
from invenio.modules.records.api import get_record
from invenio.modules.oaiharvester.models import OaiREPOSITORY
class Community(db.Model):
"""Represent a Community.
A layer around Invenio's collections and portalboxes.
"""
__tablename__ = 'community'
#
# Fields
#
id = db.Column(db.String(100), primary_key=True)
"""
Community identifier used to generate the real collection_identifier
"""
id_user = db.Column(
db.Integer(15, unsigned=True), db.ForeignKey(User.id),
nullable=False
)
""" Owner of the community. """
id_collection = db.Column(
db.Integer(15, unsigned=True), db.ForeignKey(Collection.id),
nullable=True, default=None
)
""" Invenio collection generated from this community"""
id_collection_provisional = db.Column(
db.Integer(15, unsigned=True), db.ForeignKey(Collection.id),
nullable=True, default=None
)
""" Invenio provisional collection generated from this community"""
id_oairepository = db.Column(
db.MediumInteger(9, unsigned=True), db.ForeignKey(OaiREPOSITORY.id),
nullable=True, default=None
)
""" OAI Repository set specification """
title = db.Column(db.String(length=255), nullable=False, default='')
""" Title of community."""
description = db.Column(db.Text(), nullable=False, default='')
""" Short description of community, displayed in portal boxes. """
page = db.Column(db.Text(), nullable=False, default='')
""" Long description of community, displayed on an individual page. """
curation_policy = db.Column(db.Text(), nullable=False, default='')
""" """
has_logo = db.Column(db.Boolean(), nullable=False, default=False)
""" """
created = db.Column(db.DateTime(), nullable=False, default=datetime.now)
""" Creation datetime """
last_modified = db.Column(db.DateTime(), nullable=False,
default=datetime.now, onupdate=datetime.now)
""" Last modification datetime """
last_record_accepted = db.Column(db.DateTime(), nullable=False,
default=datetime(2000, 1, 1, 0, 0, 0))
""" Last record acceptance datetime"""
ranking = db.Column(db.Integer(9), nullable=False, default=0)
""" Ranking of community. Updated by ranking deamon"""
fixed_points = db.Column(db.Integer(9), nullable=False, default=0)
""" Points which will be always added to overall score of community"""
#
# Relation ships
#
owner = db.relationship(User, backref='communities',
foreign_keys=[id_user])
""" Relation to the owner (User) of the community"""
collection = db.relationship(
Collection, uselist=False, backref='community',
foreign_keys=[id_collection]
)
""" Relationship to collection. """
collection_provisional = db.relationship(
Collection, uselist=False, backref='community_provisional',
foreign_keys=[id_collection_provisional]
)
"""Relationship to restricted collection containing uncurated records."""
oai_set = db.relationship(
OaiREPOSITORY, uselist=False, backref='community',
foreign_keys=[id_oairepository]
)
"""Relation to the owner (User) of the community."""
#
# Properties
#
@property
def logo_url(self):
"""Get URL to collection logo."""
# FIXME
if self.has_logo:
raise NotImplementedError
else:
return None
@property
def oai_url(self):
"""Get link to OAI-PMH API for this community collection."""
return "/oai2d?verb=ListRecords&metadataPrefix=oai_dc&set=%s" % (
self.get_collection_name(), )
@property
def community_url(self):
"""Get URL to this community collection."""
return "/collection/%s" % self.get_collection_name()
@property
def community_provisional_url(self):
"""Get URL to this provisional community collection."""
return "/search?cc=%s" % self.get_collection_name(provisional=True)
@property
def upload_url(self):
"""Get direct upload URL."""
return url_for('webdeposit.index', c=self.id)
@classmethod
def from_recid(cls, recid, provisional=False):
"""Get user communities specified in recid."""
rec = get_record(recid).legacy_create_recstruct()
prefix = "%s-" % (
cfg['COMMUNITIES_ID_PREFIX_PROVISIONAL']
if provisional else cfg['COMMUNITIES_ID_PREFIX'])
colls = rec.get('980', [])
usercomm = []
for c in colls:
try:
# We are only interested in subfield 'a'
code, val = c[0][0]
if code == 'a' and val.startswith(prefix):
val = val[len(prefix):]
u = cls.query.filter_by(id=val).first()
if u:
usercomm.append(u)
except IndexError:
pass
return usercomm
@classmethod
def filter_communities(cls, p, so):
"""Search for communities.
Helper function which takes from database only those communities which
match search criteria. Uses parameter 'so' to set communities in the
correct order.
Parameter 'page' is introduced to restrict results and return only
slice of them for the current page. If page == 0 function will return
all communities that match the pattern.
"""
query = cls.query
if p:
query = query.filter(db.or_(
cls.id.like("%" + p + "%"),
cls.title.like("%" + p + "%"),
cls.description.like("%" + p + "%"),
))
if so in cfg['COMMUNITIES_SORTING_OPTIONS']:
order = so == 'title' and db.asc or db.desc
query = query.order_by(order(getattr(cls, so)))
else:
query = query.order_by(db.desc(cls.ranking))
return query
#
# Utility methods
#
def get_collection_name(self, provisional=False):
"""Get a unique collection name identifier."""
if provisional:
return "%s-%s" % (
cfg['COMMUNITIES_ID_PREFIX_PROVISIONAL'],
self.id)
else:
return "%s-%s" % (
cfg['COMMUNITIES_ID_PREFIX'], self.id)
def get_title(self, provisional=False):
"""Get collection title."""
if provisional:
return "Provisional: %s" % self.title
else:
return self.title
def get_collection_dbquery(self, provisional=False):
"""Get collection query."""
return "%s:%s" % self.get_query(provisional=provisional)
def get_query(self, provisional=False):
"""Get tuple (field,value) for search engine query."""
return ("980__a", self.get_collection_name(provisional=provisional))
def render_portalbox_bodies(self, templates):
"""Get a list of rendered portal boxes for this user collection."""
ctx = {
'community': self,
}
return map(
lambda t: render_template_to_string(t, **ctx),
templates
)
#
# Curation methods
#
def _modify_record(self, recid, test_func, replace_func, include_func,
append_colls=[], replace_colls=[]):
"""Generate record a MARCXML file.
@param test_func: Function to test if a collection id should be changed
@param replace_func: Function to replace the collection id.
@param include_func: Function to test if collection should be included
"""
rec = get_record(recid).legacy_create_recstruct()
newcolls = []
dirty = False
try:
colls = rec['980']
if replace_colls:
for c in replace_colls:
newcolls.append([('a', c)])
dirty = True
else:
for c in colls:
try:
# We are only interested in subfield 'a'
code, val = c[0][0]
if test_func(code, val):
c[0][0] = replace_func(code, val)
dirty = True
if include_func(code, val):
newcolls.append(c[0])
else:
dirty = True
except IndexError:
pass
for c in append_colls:
newcolls.append([('a', c)])
dirty = True
except KeyError:
return False
if not dirty:
return False
rec = {}
record_add_field(rec, '001', controlfield_value=str(recid))
for subfields in newcolls:
record_add_field(rec, '980', subfields=subfields)
return rec
def _upload_record(self, rec, pretend=False):
"""Bibupload one record."""
from invenio.legacy.bibupload.utils import bibupload_record
if rec is False:
return None
if not pretend:
bibupload_record(
record=rec, file_prefix='community', mode='-c',
opts=[], alias="community",
)
return rec
def _upload_collection(self, coll):
"""Bibupload many records."""
from invenio.legacy.bibupload.utils import bibupload_record
bibupload_record(
collection=coll, file_prefix='community', mode='-c',
opts=[], alias="community",
)
return True
def accept_record(self, recid, pretend=False):
"""Accept a record for inclusion in a community.
@param recid: Record ID
"""
expected_id = self.get_collection_name(provisional=True)
new_id = self.get_collection_name(provisional=False)
append_colls, replace_colls = signalresult2list(pre_curation.send(
self, action='accept', recid=recid, pretend=pretend))
def test_func(code, val):
return code == 'a' and val == expected_id
def replace_func(code, val):
return (code, new_id)
def include_func(code, val):
return True
rec = self._upload_record(
self._modify_record(
recid, test_func, replace_func, include_func,
append_colls=append_colls, replace_colls=replace_colls
),
pretend=pretend
)
self.last_record_accepted = datetime.now()
db.session.commit()
post_curation.send(self, action='accept', recid=recid, record=rec,
pretend=pretend)
return rec
def reject_record(self, recid, pretend=False):
"""Reject a record for inclusion in a community.
@param recid: Record ID
"""
expected_id = self.get_collection_name(provisional=True)
new_id = self.get_collection_name(provisional=False)
append_colls, replace_colls = signalresult2list(pre_curation.send(
self, action='reject', recid=recid, pretend=pretend))
def test_func(code, val):
return False
def replace_func(code, val):
return (code, val)
def include_func(code, val):
return not (code == 'a' and (val == expected_id or val == new_id))
rec = self._upload_record(
self._modify_record(
recid, test_func, replace_func, include_func,
append_colls=append_colls, replace_colls=replace_colls
),
pretend=pretend
)
post_curation.send(self, action='reject', recid=recid, record=rec,
pretend=pretend)
return rec
#
# Data persistence methods
#
def save_collectionname(self, collection, title):
"""Create or update Collectionname object."""
if collection.id:
c_name = Collectionname.query.filter_by(
id_collection=collection.id, ln=CFG_SITE_LANG, type='ln'
).first()
if c_name:
update_changed_fields(c_name, dict(value=title))
return c_name
c_name = Collectionname(
collection=collection,
ln=CFG_SITE_LANG,
type='ln',
value=title,
)
db.session.add(c_name)
return c_name
def save_collectiondetailedrecordpagetabs(self, collection):
"""Create or update Collectiondetailedrecordpagetabs object."""
if collection.id:
c_tabs = Collectiondetailedrecordpagetabs.query.filter_by(
id_collection=collection.id
).first()
if c_tabs:
update_changed_fields(c_tabs, dict(
tabs=cfg['COMMUNITIES_TABS']))
return c_tabs
c_tabs = Collectiondetailedrecordpagetabs(
collection=collection,
tabs=cfg['COMMUNITIES_TABS'],
)
db.session.add(c_tabs)
return c_tabs
def save_collectioncollection(self, collection, parent_name):
"""Create or update CollectionCollection object."""
dad = Collection.query.filter_by(name=parent_name).first()
if collection.id:
c_tree = CollectionCollection.query.filter_by(
id_dad=dad.id,
id_son=collection.id
).first()
if c_tree:
update_changed_fields(c_tree, dict(
type=cfg['COMMUNITIES_COLLECTION_TYPE'],
score=cfg['COMMUNITIES_COLLECTION_SCORE']))
return c_tree
c_tree = CollectionCollection(
dad=dad,
son=collection,
type=cfg['COMMUNITIES_COLLECTION_TYPE'],
score=cfg['COMMUNITIES_COLLECTION_SCORE'],
)
db.session.add(c_tree)
return c_tree
def save_collectionformat(self, collection, fmt_str):
"""Create or update CollectionFormat object."""
if collection.id:
c_fmt = CollectionFormat.query.filter_by(
id_collection=collection.id
).first()
if c_fmt:
update_changed_fields(c_fmt, dict(format=fmt_str, score=1))
return c_fmt
c_fmt = CollectionFormat(
collection=collection,
format_code=fmt_str,
)
db.session.add(c_fmt)
return c_fmt
def save_collectionportalboxes(self, collection, templates):
"""Create or update Portalbox and CollectionPortalbox objects."""
# Setup portal boxes
bodies = self.render_portalbox_bodies(templates)
bodies.reverse() # Highest score is on the top, so we reverse the list
objects = []
if collection.id:
c_pboxes = CollectionPortalbox.query.filter_by(
id_collection=collection.id,
ln=CFG_SITE_LANG,
).all()
if len(c_pboxes) == len(bodies):
for score, elem in enumerate(zip(c_pboxes, bodies)):
c_pbox, body = elem
pbox = c_pbox.portalbox
update_changed_fields(pbox, dict(body=body))
update_changed_fields(c_pbox, dict(
score=score,
position=cfg[
'COMMUNITIES_PORTALBOX_POSITION']))
objects.append(c_pbox)
return objects
else:
# Either templates where modified or collection portalboxes
# where modified outside of the UserCollection. In either case,
# remove existing portalboxes and add new ones.
for c_pbox in c_pboxes:
db.session.delete(c_pbox.portalbox)
db.session.delete(c_pbox)
for score, body in enumerate(bodies):
p = Portalbox(title='', body=body)
c_pbox = CollectionPortalbox()
update_changed_fields(c_pbox, dict(
collection=collection,
portalbox=p,
ln=CFG_SITE_LANG,
position=cfg['COMMUNITIES_PORTALBOX_POSITION'],
score=score,
))
db.session.add_all([p, c_pbox])
objects.append(c_pbox)
return objects
def save_oairepository_set(self, provisional=False):
"""Create or update OAI Repository set."""
collection_name = self.get_collection_name(provisional=provisional)
(f1, p1) = self.get_query(provisional=provisional)
fields = dict(
setName='%s set' % collection_name,
setSpec=collection_name,
setDescription=self.description,
p1=p1, f1=f1, m1='e',
p2='', f2='', m2='',
p3='', f3='', m3='',
setDefinition=''
)
if self.oai_set:
update_changed_fields(self.oai_set, fields)
else:
self.oai_set = OaiREPOSITORY(**fields)
db.session.add(self.oai_set)
def save_acl(self, collection_id, collection_name):
"""Create or update authorization.
Needed for user to view provisional collection.
"""
# Role - use Community id, because role name is limited to 32 chars.
role_name = 'coll_%s' % collection_id
role = AccROLE.query.filter_by(name=role_name).first()
if not role:
role = AccROLE(
name=role_name,
description='Curators of Community {collection}'.format(
collection=collection_name))
db.session.add(role)
# Argument
fields = dict(keyword='collection', value=collection_name)
arg = AccARGUMENT.query.filter_by(**fields).first()
if not arg:
arg = AccARGUMENT(**fields)
db.session.add(arg)
# Action
action = AccACTION.query.filter_by(name='viewrestrcoll').first()
# User role
alluserroles = UserAccROLE.query.filter_by(role=role).all()
userrole = None
if alluserroles:
# Remove any user which is not the owner
for ur in alluserroles:
if ur.id_user == self.id_user:
db.session.delete(ur)
else:
userrole = ur
if not userrole:
userrole = UserAccROLE(id_user=self.id_user, role=role)
db.session.add(userrole)
# Authorization
auth = AccAuthorization.query.filter_by(role=role, action=action,
argument=arg).first()
if not auth:
auth = AccAuthorization(role=role, action=action, argument=arg,
argumentlistid=1)
def save_collection(self, provisional=False):
"""Create or update a new collection.
Including name, tabs, collection tree, collection output formats,
portalboxes and OAI repository set.
"""
# Setup collection
collection_name = self.get_collection_name(provisional=provisional)
c = Collection.query.filter_by(name=collection_name).first()
fields = dict(
name=collection_name,
dbquery=self.get_collection_dbquery(provisional=provisional)
)
if c:
before_save_collection.send(self, is_new=True,
provisional=provisional)
update_changed_fields(c, fields)
else:
before_save_collection.send(self, is_new=False,
provisional=provisional)
c = Collection(**fields)
db.session.add(c)
db.session.commit()
setattr(self,
'collection_provisional' if provisional else 'collection',
c)
# Setup OAI Repository
if provisional:
self.save_acl(c.id, collection_name)
else:
self.save_oairepository_set(provisional=provisional)
# Setup title, tabs and collection tree
self.save_collectionname(c, self.get_title(provisional=provisional))
self.save_collectiondetailedrecordpagetabs(c)
self.save_collectioncollection(
c,
cfg['COMMUNITIES_PARENT_NAME_PROVISIONAL']
if provisional else cfg['COMMUNITIES_PARENT_NAME']
)
# Setup collection format is needed
if not provisional and cfg['COMMUNITIES_OUTPUTFORMAT']:
self.save_collectionformat(
c, cfg['COMMUNITIES_OUTPUTFORMAT'])
elif provisional and cfg['COMMUNITIES_OUTPUTFORMAT_PROVISIONAL']:
self.save_collectionformat(
c, cfg['COMMUNITIES_OUTPUTFORMAT_PROVISIONAL'])
# Setup portal boxes
self.save_collectionportalboxes(
c,
cfg['COMMUNITIES_PORTALBOXES_PROVISIONAL']
if provisional else cfg['COMMUNITIES_PORTALBOXES']
)
db.session.commit()
after_save_collection.send(self, collection=c, provisional=provisional)
def save_collections(self):
"""Create restricted and unrestricted collections."""
before_save_collections.send(self)
self.save_collection(provisional=False)
self.save_collection(provisional=True)
after_save_collections.send(self)
def delete_record_collection_identifiers(self):
"""Remove collection identifiers from all records."""
from invenio.legacy.search_engine import search_pattern
provisional_id = self.get_collection_name(provisional=True)
normal_id = self.get_collection_name(provisional=False)
def test_func(code, val):
return False
def replace_func(code, val):
return (code, val)
def include_func(code, val):
return not (code == 'a' and (
val == provisional_id or val == normal_id))
coll = []
for r in search_pattern(p="980__a:%s OR 980__a:%s" % (
normal_id, provisional_id)):
coll.append(
self._modify_record(r, test_func, replace_func, include_func)
)
self._upload_collection(coll)
def delete_collection(self, provisional=False):
"""Delete all objects related to a single collection."""
# Most of the logic in this method ought to be moved to a
# Collection.delete() method.
c = getattr(self, "collection_provisional"
if provisional else "collection")
collection_name = self.get_collection_name(provisional=provisional)
before_delete_collection.send(self, collection=c,
provisional=provisional)
if c:
# Delete portal boxes
for c_pbox in c.portalboxes:
if c_pbox.portalbox:
db.session.delete(c_pbox.portalbox)
db.session.delete(c_pbox)
db.session.commit()
# Delete output formats:
CollectionFormat.query.filter_by(id_collection=c.id).delete()
# Delete title, tabs, collection tree
Collectionname.query.filter_by(id_collection=c.id).delete()
CollectionCollection.query.filter_by(id_son=c.id).delete()
Collectiondetailedrecordpagetabs.query.filter_by(
id_collection=c.id).delete()
if provisional:
# Delete ACLs
AccARGUMENT.query.filter_by(keyword='collection',
value=collection_name).delete()
role = AccROLE.query.filter_by(name='coll_%s' % c.id).first()
if role:
UserAccROLE.query.filter_by(role=role).delete()
AccAuthorization.query.filter_by(role=role).delete()
db.session.delete(role)
else:
# Delete OAI repository
if self.oai_set:
db.session.delete(self.oai_set)
# Delete collection
if c:
db.session.delete(c)
db.session.commit()
after_delete_collection.send(self, provisional=provisional)
def delete_collections(self):
"""Delete collection and all associated objects."""
before_delete_collections.send(self)
self.delete_record_collection_identifiers()
self.delete_collection(provisional=False)
self.delete_collection(provisional=True)
after_delete_collections.send(self)
def __str__(self):
"""Return a string representation of an object."""
return self.id
def update_changed_fields(obj, fields):
"""Utility method to update fields on an object if they have changed.
Will also report back if any changes where made.
"""
dirty = False
for attr, newval in fields.items():
val = getattr(obj, attr)
if val != newval:
setattr(obj, attr, newval)
dirty = True
return dirty
def signalresult2list(extra_colls):
"""Convert signal's result to the list."""
replace = list(set(reduce(sum, map(
lambda x: x[1].get('replace', []) if x[1] else [],
extra_colls or [(None, None)]))))
append = list(set(reduce(sum, map(
lambda x: x[1].get('append', []) if x[1] else [],
extra_colls or [(None, None)]))))
return (append, replace)
class FeaturedCommunity(db.Model):
"""Featured community representation."""
__tablename__ = 'communityFEATURED'
id = db.Column(db.Integer(15, unsigned=True), primary_key=True,
autoincrement=True)
"""Featured community identifier."""
id_community = db.Column(
db.String(100), db.ForeignKey(Community.id),
nullable=False
)
"""Specific community."""
start_date = db.Column(
db.DateTime(), nullable=False, default=datetime.now
)
"""The date from which it should start to take effect."""
community = db.relationship(Community,
backref="featuredcommunity")
"""Relation to the community."""
@classmethod
def get_current(cls, start_date=None):
"""Get the latest featured community."""
start_date = start_date or datetime.now()
return cls.query.options(db.joinedload_all(
'community.collection')).filter(
cls.start_date <= start_date).order_by(
cls.start_date.desc()).first()
|
rackerlabs/instrumented-ceilometer
|
refs/heads/master
|
ceilometer/storage/impl_log.py
|
1
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simple logging storage backend.
"""
from ceilometer.openstack.common import log
from ceilometer.storage import base
LOG = log.getLogger(__name__)
class LogStorage(base.StorageEngine):
"""Log the data
"""
def get_connection(self, conf):
"""Return a Connection instance based on the configuration settings.
"""
return Connection(conf)
class Connection(base.Connection):
"""Base class for storage system connections.
"""
def __init__(self, conf):
pass
def upgrade(self):
pass
def clear(self):
pass
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
LOG.info(_('metering data %(counter_name)s for %(resource_id)s: '
'%(counter_volume)s')
% ({'counter_name': data['counter_name'],
'resource_id': data['resource_id'],
'counter_volume': data['counter_volume']}))
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system according to the
time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.info(_("Dropping data with TTL %d"), ttl)
def get_users(self, source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
return []
def get_projects(self, source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
return []
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery={}, resource=None, pagination=None):
"""Return an iterable of dictionaries containing resource information.
{ 'resource_id': UUID of the resource,
'project_id': UUID of project owning the resource,
'user_id': UUID of user owning the resource,
'timestamp': UTC datetime of last update to the resource,
'metadata': most current metadata for the resource,
'meter': list of the meters reporting data for the resource,
}
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param pagination: Optional pagination query.
"""
return []
def get_meters(self, user=None, project=None, resource=None, source=None,
limit=None, metaquery={}, pagination=None):
"""Return an iterable of dictionaries containing meter information.
{ 'name': name of the meter,
'type': type of the meter (guage, counter),
'resource_id': UUID of the resource,
'project_id': UUID of project owning the resource,
'user_id': UUID of user owning the resource,
}
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param limit: Maximum number of results to return.
:param metaquery: Optional dict with metadata to match on.
:param pagination: Optional pagination query.
"""
return []
def get_samples(self, sample_filter):
"""Return an iterable of samples as created by
:func:`ceilometer.meter.meter_message_from_counter`.
"""
return []
def get_meter_statistics(self, sample_filter, period=None, groupby=None):
"""Return a dictionary containing meter statistics.
described by the query parameters.
The filter must have a meter value set.
{ 'min':
'max':
'avg':
'sum':
'count':
'period':
'period_start':
'period_end':
'duration':
'duration_start':
'duration_end':
}
"""
return []
def get_alarms(self, name=None, user=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
"""
return []
def create_alarm(self, alarm):
"""Create alarm.
"""
return alarm
def update_alarm(self, alarm):
"""update alarm
"""
return alarm
def delete_alarm(self, alarm_id):
"""Delete a alarm
"""
def get_alarm_changes(self, alarm_id, on_behalf_of,
user=None, project=None, type=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:project type: Optional change type
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
"""
raise NotImplementedError('Alarm history not implemented')
def record_alarm_change(self, alarm_change):
"""Record alarm change event.
"""
raise NotImplementedError('Alarm history not implemented')
def record_events(self, events):
"""Write the events.
:param events: a list of model.Event objects.
"""
raise NotImplementedError('Events not implemented.')
def get_events(self, event_filter):
"""Return an iterable of model.Event objects.
:param event_filter: EventFilter instance
"""
raise NotImplementedError('Events not implemented.')
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/extras/csrf_migration_helper.py
|
59
|
#!/usr/bin/env python
# This script aims to help developers locate forms and view code that needs to
# use the new CSRF protection in Django 1.2. It tries to find all the code that
# may need the steps described in the CSRF documentation. It does not modify
# any code directly, it merely attempts to locate it. Developers should be
# aware of its limitations, described below.
#
# For each template that contains at least one POST form, the following info is printed:
#
# <Absolute path to template>
# AKA: <Aliases (relative to template directory/directories that contain it)>
# POST forms: <Number of POST forms>
# With token: <Number of POST forms with the CSRF token already added>
# Without token:
# <File name and line number of form without token>
#
# Searching for:
# <Template names that need to be searched for in view code
# (includes templates that 'include' current template)>
#
# Found:
# <File name and line number of any view code found>
#
# The format used allows this script to be used in Emacs grep mode:
# M-x grep
# Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs
# Limitations
# ===========
#
# - All templates must be stored on disk in '.html' or '.htm' files.
# (extensions configurable below)
#
# - All Python code must be stored on disk in '.py' files. (extensions
# configurable below)
#
# - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/'
# directory in apps specified in INSTALLED_APPS. Non-file based template
# loaders are out of the picture, because there is no way to ask them to
# return all templates.
#
# - It's impossible to programmatically determine which forms should and should
# not have the token added. The developer must decide when to do this,
# ensuring that the token is only added to internally targeted forms.
#
# - It's impossible to programmatically work out when a template is used. The
# attempts to trace back to view functions are guesses, and could easily fail
# in the following ways:
#
# * If the 'include' template tag is used with a variable
# i.e. {% include tname %} where tname is a variable containing the actual
# template name, rather than {% include "my_template.html" %}.
#
# * If the template name has been built up by view code instead of as a simple
# string. For example, generic views and the admin both do this. (These
# apps are both contrib and both use RequestContext already, as it happens).
#
# * If the 'ssl' tag (or any template tag other than 'include') is used to
# include the template in another template.
#
# - All templates belonging to apps referenced in INSTALLED_APPS will be
# searched, which may include third party apps or Django contrib. In some
# cases, this will be a good thing, because even if the templates of these
# apps have been fixed by someone else, your own view code may reference the
# same template and may need to be updated.
#
# You may, however, wish to comment out some entries in INSTALLED_APPS or
# TEMPLATE_DIRS before running this script.
# Improvements to this script are welcome!
# Configuration
# =============
TEMPLATE_EXTENSIONS = [
".html",
".htm",
]
PYTHON_SOURCE_EXTENSIONS = [
".py",
]
TEMPLATE_ENCODING = "UTF-8"
PYTHON_ENCODING = "UTF-8"
# Method
# ======
# Find templates:
# - template dirs
# - installed apps
#
# Search for POST forms
# - Work out what the name of the template is, as it would appear in an
# 'include' or get_template() call. This can be done by comparing template
# filename to all template dirs. Some templates can have more than one
# 'name' e.g. if a directory and one of its child directories are both in
# TEMPLATE_DIRS. This is actually a common hack used for
# overriding-and-extending admin templates.
#
# For each POST form,
# - see if it already contains '{% csrf_token %}' immediately after <form>
# - work back to the view function(s):
# - First, see if the form is included in any other templates, then
# recursively compile a list of affected templates.
# - Find any code function that references that template. This is just a
# brute force text search that can easily return false positives
# and fail to find real instances.
import os
import sys
import re
from optparse import OptionParser
USAGE = """
This tool helps to locate forms that need CSRF tokens added and the
corresponding view code. This processing is NOT fool proof, and you should read
the help contained in the script itself. Also, this script may need configuring
(by editing the script) before use.
Usage:
python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...]
Paths can be specified as relative paths.
With no arguments, this help is printed.
"""
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_FORM_CLOSE_RE = re.compile(r'</form\s*>')
_TOKEN_RE = re.compile('\{% csrf_token')
def get_template_dirs():
"""
Returns a set of all directories that contain project templates.
"""
from django.conf import settings
dirs = set()
if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS):
dirs.update(map(unicode, settings.TEMPLATE_DIRS))
if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS):
from django.template.loaders.app_directories import app_template_dirs
dirs.update(app_template_dirs)
return dirs
def make_template_info(filename, root_dirs):
"""
Creates a Template object for a filename, calculating the possible
relative_filenames from the supplied filename and root template directories
"""
return Template(filename,
[filename[len(d)+1:] for d in root_dirs if filename.startswith(d)])
class Template(object):
def __init__(self, absolute_filename, relative_filenames):
self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames
def content(self):
try:
return self._content
except AttributeError:
with open(self.absolute_filename) as fd:
try:
content = fd.read().decode(TEMPLATE_ENCODING)
except UnicodeDecodeError as e:
message = '%s in %s' % (
e[4], self.absolute_filename.encode('UTF-8', 'ignore'))
raise UnicodeDecodeError(*(e.args[:4] + (message,)))
self._content = content
return content
content = property(content)
def post_form_info(self):
"""
Get information about any POST forms in the template.
Returns [(linenumber, csrf_token added)]
"""
forms = {}
form_line = 0
for ln, line in enumerate(self.content.split("\n")):
if not form_line and _POST_FORM_RE.search(line):
# record the form with no CSRF token yet
form_line = ln + 1
forms[form_line] = False
if form_line and _TOKEN_RE.search(line):
# found the CSRF token
forms[form_line] = True
form_line = 0
if form_line and _FORM_CLOSE_RE.search(line):
# no token found by form closing tag
form_line = 0
return forms.items()
def includes_template(self, t):
"""
Returns true if this template includes template 't' (via {% include %})
"""
for r in t.relative_filenames:
if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content):
return True
return False
def related_templates(self):
"""
Returns all templates that include this one, recursively. (starting
with this one)
"""
try:
return self._related_templates
except AttributeError:
pass
retval = set([self])
for t in self.all_templates:
if t.includes_template(self):
# If two templates mutually include each other, directly or
# indirectly, we have a problem here...
retval = retval.union(t.related_templates())
self._related_templates = retval
return retval
def __repr__(self):
return repr(self.absolute_filename)
def __eq__(self, other):
return self.absolute_filename == other.absolute_filename
def __hash__(self):
return hash(self.absolute_filename)
def get_templates(dirs):
"""
Returns all files in dirs that have template extensions, as Template
objects.
"""
templates = set()
for root in dirs:
for (dirpath, dirnames, filenames) in os.walk(root):
for f in filenames:
if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0:
t = make_template_info(os.path.join(dirpath, f), dirs)
# templates need to be able to search others:
t.all_templates = templates
templates.add(t)
return templates
def get_python_code(paths):
"""
Returns all Python code, as a list of tuples, each one being:
(filename, list of lines)
"""
retval = []
for p in paths:
if not os.path.isdir(p):
raise Exception("'%s' is not a directory." % p)
for (dirpath, dirnames, filenames) in os.walk(p):
for f in filenames:
if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0:
fn = os.path.join(dirpath, f)
with open(fn) as fd:
content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()]
retval.append((fn, content))
return retval
def search_python_list(python_code, template_names):
"""
Searches python code for a list of template names.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = set()
for tn in template_names:
retval.update(search_python(python_code, tn))
return sorted(retval)
def search_python(python_code, template_name):
"""
Searches Python code for a template name.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for fn, content in python_code:
for ln, line in enumerate(content):
if ((u'"%s"' % template_name) in line) or \
((u"'%s'" % template_name) in line):
retval.append((fn, ln + 1))
return retval
def main(pythonpaths):
template_dirs = get_template_dirs()
templates = get_templates(template_dirs)
python_code = get_python_code(pythonpaths)
for t in templates:
# Logic
form_matches = t.post_form_info()
num_post_forms = len(form_matches)
form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token]
if num_post_forms == 0:
continue
to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames]
found = search_python_list(python_code, to_search)
# Display:
print(t.absolute_filename)
for r in t.relative_filenames:
print(" AKA %s" % r)
print(" POST forms: %s" % num_post_forms)
print(" With token: %s" % (num_post_forms - len(form_lines_without_token)))
if form_lines_without_token:
print(" Without token:")
for ln in form_lines_without_token:
print("%s:%d:" % (t.absolute_filename, ln))
print('')
print(" Searching for:")
for r in to_search:
print(" " + r)
print('')
print(" Found:")
if len(found) == 0:
print(" Nothing")
else:
for fn, ln in found:
print("%s:%d:" % (fn, ln))
print('')
print("----")
parser = OptionParser(usage=USAGE)
parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file")
if __name__ == '__main__':
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
settings = getattr(options, 'settings', None)
if settings is None:
if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None:
print("You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter")
sys.exit(1)
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings
main(args)
|
cloudbase/nova-virtualbox
|
refs/heads/virtualbox_driver
|
nova/tests/unit/scheduler/filters/test_affinity_filters.py
|
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.scheduler.filters import affinity_filter
from nova import test
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
@mock.patch('nova.compute.api.API.get_all')
class TestDifferentHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestDifferentHostFilter, self).setUp()
self.filt_cls = affinity_filter.DifferentHostFilter()
def test_affinity_different_filter_passes(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = []
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['fake'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_different_filter_no_list_passes(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = []
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': 'fake'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_different_filter_fails(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = [mock.sentinel.instances]
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['fake'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_different_filter_handles_none(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertFalse(get_all_mock.called)
@mock.patch('nova.compute.api.API.get_all')
class TestSameHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestSameHostFilter, self).setUp()
self.filt_cls = affinity_filter.SameHostFilter()
def test_affinity_same_filter_passes(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = [mock.sentinel.images]
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['fake'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_same_filter_no_list_passes(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = [mock.sentinel.images]
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': 'fake'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_same_filter_fails(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
get_all_mock.return_value = []
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['fake'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
get_all_mock.assert_called_once_with(mock.sentinel.ctx,
{'host': 'host1',
'uuid': ['fake'],
'deleted': False})
def test_affinity_same_filter_handles_none(self, get_all_mock):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertFalse(get_all_mock.called)
class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
def setUp(self):
super(TestSimpleCIDRAffinityFilter, self).setUp()
self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
def test_affinity_simple_cidr_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestGroupAffinityFilter(test.NoDBTestCase):
def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': [policy]}
filter_properties['group_hosts'] = []
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties['group_hosts'] = ['host2']
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['anti-affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity'],
'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
def _test_group_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
|
ZazieTheBeast/oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/pages/config.py
|
58
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PagesDashboardConfig(AppConfig):
label = 'pages_dashboard'
name = 'oscar.apps.dashboard.pages'
verbose_name = _('Pages dashboard')
|
watsonyanghx/CS231n
|
refs/heads/master
|
assignment3/cs231n/im2col.py
|
53
|
import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
pass
|
tijko/Project-Euler
|
refs/heads/master
|
py_solutions_61-70/Euler_69.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Euler's Totient Function
from __future__ import print_function, division
import math
import timeit
try:
range = xrange
except NameError:
pass
def is_prime(x):
if x == 2:
return True
if x % 2 == 0 or x == 1:
return False
for i in range(3, int(math.sqrt(x)) + 1, 2):
if x % i == 0:
return False
return True
def build_primes_list(limit):
return list(filter(is_prime, range(2, limit)))
def calculate_phi(phi, primes):
base = phi
for v in primes:
if base % v == 0:
phi *= (1 - (1 / v))
return phi
def euler_69():
high, high_phi = 0, 0
limit = 1000001
primes_list = build_primes_list(int(math.sqrt(limit)) + 1)
for i in range(2, limit):
prime_factor = int(math.sqrt(i))
phi = calculate_phi(i, primes_list[:prime_factor])
current = i / phi
if current > high_phi:
high_phi, high = (current, i)
return high
if __name__ == '__main__':
start = timeit.default_timer()
print('Answer: {}'.format(euler_69()))
stop = timeit.default_timer()
print('Time: {0:9.5f}'.format(stop - start))
|
nan86150/ImageFusion
|
refs/heads/master
|
lib/python2.7/site-packages/pip/utils/__init__.py
|
186
|
from __future__ import absolute_import
import contextlib
import errno
import locale
import logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'Inf', 'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() # this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_path_relative('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_path_relative('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..'] * len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = os.path.expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
# TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
logger.debug(line)
if not all_output:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
else:
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
if stdout is not None:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
|
ecobost/pipeline
|
refs/heads/master
|
python/pipeline/legacy/aod_monet.py
|
6
|
import datajoint as dj
schema = dj.schema('pipeline_aod_monet', locals())
|
EUDAT-B2SHARE/invenio-old
|
refs/heads/next
|
modules/bibexport/lib/bibexport_method_sitemap.py
|
5
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibExport plugin implementing 'sitemap' exporting method.
The main function is run_export_method(jobname) defined at the end.
This is what BibExport daemon calls for all the export jobs that use
this exporting method.
"""
from datetime import datetime
from urllib import quote
from ConfigParser import ConfigParser
import os
import gzip
import time
from invenio.search_engine import get_collection_reclist
from invenio.dbquery import run_sql
from invenio.config import CFG_SITE_URL, CFG_WEBDIR, CFG_ETCDIR, \
CFG_SITE_RECORD, CFG_SITE_LANGS
from invenio.intbitset import intbitset
from invenio.websearch_webcoll import Collection
from invenio.bibtask import write_message, task_update_progress, task_sleep_now_if_required
from invenio.textutils import encode_for_xml
from invenio.urlutils import get_canonical_and_alternates_urls
DEFAULT_TIMEZONE = '+01:00'
DEFAULT_PRIORITY_HOME = 1
DEFAULT_CHANGEFREQ_HOME = 'hourly'
DEFAULT_PRIORITY_RECORDS = 0.8
DEFAULT_CHANGEFREQ_RECORDS = 'weekly'
DEFAULT_PRIORITY_COMMENTS = 0.4
DEFAULT_CHANGEFREQ_COMMENTS = 'weekly'
DEFAULT_PRIORITY_REVIEWS = 0.6
DEFAULT_CHANGEFREQ_REVIEWS = 'weekly'
DEFAULT_PRIORITY_FULLTEXTS = 0.9
DEFAULT_CHANGEFREQ_FULLTEXTS = 'weekly'
DEFAULT_PRIORITY_COLLECTIONS = 0.3
DEFAULT_CHANGEFREQ_COLLECTIONS = 'hourly'
MAX_RECORDS = 50000
MAX_SIZE = 10000000
def get_all_public_records(collections):
""" Get all records which exist (i.e. not suppressed ones) and are in
accessible collection.
returns list of (recid, last_modification) tuples
"""
recids = intbitset()
for collection in collections:
recids += get_collection_reclist(collection)
query = 'SELECT id, modification_date FROM bibrec'
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def get_all_public_collections(base_collections):
""" Return a list of (collection.name, last_modification) tuples for all
collections and subcollections of base_collections
"""
def get_collection_last_modification(collection):
""" last modification = modification date fo latest added record """
last_mod = None
query_last_mod = "SELECT modification_date FROM bibrec WHERE id=%s"
try:
latest_recid = collection.reclist.tolist()[-1]
except IndexError:
# this collection is empty
return last_mod
res = run_sql(query_last_mod, (latest_recid,))
if res and res[0][0]:
last_mod = res[0][0]
return last_mod
output = []
for coll_name in base_collections:
mother_collection = Collection(coll_name)
if not mother_collection.restricted_p():
last_mod = get_collection_last_modification(mother_collection)
output.append((coll_name, last_mod))
for descendant in mother_collection.get_descendants(type='r'):
if not descendant.restricted_p():
last_mod = get_collection_last_modification(descendant)
output.append((descendant.name, last_mod))
for descendant in mother_collection.get_descendants(type='v'):
if not descendant.restricted_p():
last_mod = get_collection_last_modification(descendant)
output.append((descendant.name, last_mod))
return output
def filter_fulltexts(recids, fulltext_type=None):
""" returns list of records having a fulltext of type fulltext_type.
If fulltext_type is empty, return all records having a fulltext"""
recids = dict(recids)
if fulltext_type:
query = """SELECT id_bibrec, max(modification_date)
FROM bibrec_bibdoc
LEFT JOIN bibdoc ON bibrec_bibdoc.id_bibdoc=bibdoc.id
WHERE type=%s
GROUP BY id_bibrec"""
res = run_sql(query, (fulltext_type,))
else:
query = """SELECT id_bibrec, max(modification_date)
FROM bibrec_bibdoc
LEFT JOIN bibdoc ON bibrec_bibdoc.id_bibdoc=bibdoc.id
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def filter_comments(recids):
""" Retrieve recids having a comment. return (recid, last_review_date)"""
recids = dict(recids)
query = """SELECT id_bibrec, max(date_creation)
FROM cmtRECORDCOMMENT
WHERE star_score=0
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
def filter_reviews(recids):
""" Retrieve recids having a review. return (recid, last_review_date)"""
recids = dict(recids)
query = """SELECT id_bibrec, max(date_creation)
FROM cmtRECORDCOMMENT
WHERE star_score>0
GROUP BY id_bibrec"""
res = run_sql(query)
return [(recid, lastmod) for (recid, lastmod) in res if recid in recids]
SITEMAP_HEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xhtml="http://www.w3.org/1999/xhtml"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">"""
SITEMAP_FOOTER = '\n</urlset>\n'
class SitemapWriter(object):
""" Writer for sitemaps"""
def __init__(self, sitemap_id):
""" Constructor.
name: path to the sitemap file to be created
"""
self.header = SITEMAP_HEADER
self.footer = SITEMAP_FOOTER
self.sitemap_id = sitemap_id
self.name = os.path.join(CFG_WEBDIR, 'sitemap-%02d.xml.gz' % sitemap_id)
self.filedescriptor = gzip.open(self.name + '.part', 'w')
self.num_urls = 0
self.file_size = 0
self.filedescriptor.write(self.header)
self.file_size += len(self.footer)
def add_url(self, url, lastmod=datetime(1900, 1, 1), changefreq="", priority="", alternate=False):
""" create a new url node. Returns the number of url nodes in sitemap"""
self.num_urls += 1
canonical_url, alternate_urls = get_canonical_and_alternates_urls(url, drop_ln=not alternate)
url_node = u"""
<url>
<loc>%s</loc>%s
</url>"""
optional = ''
if lastmod:
optional += u"""
<lastmod>%s</lastmod>""" % lastmod.strftime('%Y-%m-%dT%H:%M:%S' + \
DEFAULT_TIMEZONE)
if changefreq:
optional += u"""
<changefreq>%s</changefreq>""" % changefreq
if priority:
optional += u"""
<priority>%s</priority>""" % priority
if alternate:
for ln, alternate_url in alternate_urls.iteritems():
ln = ln.replace('_', '-') ## zh_CN -> zh-CN
optional += u"""
<xhtml:link rel="alternate" hreflang="%s" href="%s" />""" % (ln, encode_for_xml(alternate_url, quote=True))
url_node %= (encode_for_xml(canonical_url), optional)
self.file_size += len(url_node)
self.filedescriptor.write(url_node)
return self.num_urls
def get_size(self):
""" File size. Should not be > 10MB """
return self.file_size + len(self.footer)
def get_number_of_urls(self):
""" Number of urls in the sitemap. Should not be > 50'000"""
return self.num_urls
def get_name(self):
""" Returns the filename """
return self.name
def get_sitemap_url(self):
""" Returns the sitemap URL"""
return CFG_SITE_URL + '/' + os.path.basename(self.name)
def __del__(self):
""" Writes the whole sitemap """
self.filedescriptor.write(self.footer)
self.filedescriptor.close()
os.rename(self.name + '.part', self.name)
SITEMAP_INDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/siteindex.xsd"\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
SITEMAP_INDEX_FOOTER = '\n</sitemapindex>\n'
class SitemapIndexWriter(object):
"""class for writing Sitemap Index files."""
def __init__(self, name):
""" Constructor.
name: path to the sitemap index file to be created
"""
self.header = SITEMAP_INDEX_HEADER
self.footer = SITEMAP_INDEX_FOOTER
self.name = name
self.filedescriptor = gzip.open(self.name + '.part', 'w')
self.num_urls = 0
self.file_size = 0
self.filedescriptor.write(self.header)
self.file_size += len(self.footer)
def add_url(self, url):
""" create a new url node. Returns the number of url nodes in sitemap"""
self.num_urls += 1
url_node = u"""
<sitemap>
<loc>%s</loc>%s
</sitemap>"""
optional = u"""
<lastmod>%s</lastmod>""" % time.strftime('%Y-%m-%dT%H:%M:%S' +\
DEFAULT_TIMEZONE)
url_node %= (url, optional)
self.file_size += len(url_node)
self.filedescriptor.write(url_node)
return self.num_urls
def __del__(self):
""" Writes the whole sitemap """
self.filedescriptor.write(self.footer)
self.filedescriptor.close()
os.rename(self.name + '.part', self.name)
def generate_sitemaps(sitemap_index_writer, collection_names, fulltext_filter=''):
"""
Generate sitemaps themselves. Return list of generated sitemaps files
"""
sitemap_id = 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = 0
for lang in CFG_SITE_LANGS:
writer.add_url(CFG_SITE_URL + '/?ln=%s' % lang,
lastmod=datetime.today(),
changefreq=DEFAULT_CHANGEFREQ_HOME,
priority=DEFAULT_PRIORITY_HOME)
nb_urls += 1
write_message("... Getting all public records...")
recids = get_all_public_records(collection_names)
write_message("... Generating urls for %s records..." % len(recids))
task_sleep_now_if_required(can_stop_too=True)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_RECORDS,
priority = DEFAULT_PRIORITY_RECORDS)
if i % 100 == 0:
task_update_progress("Sitemap for recid %s/%s" % (i + 1, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for collections...")
collections = get_all_public_collections(collection_names)
for i, (collection, lastmod) in enumerate(collections):
for lang in CFG_SITE_LANGS:
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url('%s/collection/%s?ln=%s' % (CFG_SITE_URL, quote(collection), lang),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_COLLECTIONS,
priority = DEFAULT_PRIORITY_COLLECTIONS,
alternate=True)
if i % 100 == 0:
task_update_progress("Sitemap for collection %s/%s" % (i + 1, len(collections)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for fulltexts...")
recids = filter_fulltexts(recids, fulltext_filter)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/files' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_FULLTEXTS,
priority = DEFAULT_PRIORITY_FULLTEXTS)
if i % 100 == 0:
task_update_progress("Sitemap for files page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for comments...")
recids = filter_comments(recids)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/comments' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_COMMENTS,
priority = DEFAULT_PRIORITY_COMMENTS)
if i % 100 == 0:
task_update_progress("Sitemap for comments page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
write_message("... Generating urls for reviews")
recids = filter_reviews(recids)
for i, (recid, lastmod) in enumerate(recids):
if nb_urls % 100 == 0 and (writer.get_size() >= MAX_SIZE or nb_urls >= MAX_RECORDS):
sitemap_id += 1
write_message("")
writer = SitemapWriter(sitemap_id)
sitemap_index_writer.add_url(writer.get_sitemap_url())
nb_urls = writer.add_url(CFG_SITE_URL + '/%s/%s/reviews' % (CFG_SITE_RECORD, recid),
lastmod = lastmod,
changefreq = DEFAULT_CHANGEFREQ_REVIEWS,
priority = DEFAULT_PRIORITY_REVIEWS)
if i % 100 == 0:
task_update_progress("Sitemap for reviews page %s/%s" % (i, len(recids)))
task_sleep_now_if_required(can_stop_too=True)
def generate_sitemaps_index(collection_list, fulltext_filter=None):
"""main function. Generates the sitemap index and the sitemaps
collection_list: list of collection names to add in sitemap
fulltext_filter: if provided the parser will intergrate only give fulltext
types
"""
write_message("Generating all sitemaps...")
sitemap_index_writer = SitemapIndexWriter(CFG_WEBDIR + '/sitemap-index.xml.gz')
generate_sitemaps(sitemap_index_writer, collection_list, fulltext_filter)
def run_export_method(jobname):
"""Main function, reading params and running the task."""
write_message("bibexport_sitemap: job %s started." % jobname)
collections = get_config_parameter(jobname=jobname, parameter_name="collection", is_parameter_collection = True)
fulltext_type = get_config_parameter(jobname=jobname, parameter_name="fulltext_status")
generate_sitemaps_index(collections, fulltext_type)
write_message("bibexport_sitemap: job %s finished." % jobname)
def get_config_parameter(jobname, parameter_name, is_parameter_collection = False):
"""Detect export method of JOBNAME. Basically, parse JOBNAME.cfg
and return export_method. Return None if problem found."""
jobconfig = ConfigParser()
jobconffile = CFG_ETCDIR + os.sep + 'bibexport' + os.sep + jobname + '.cfg'
if not os.path.exists(jobconffile):
write_message("ERROR: cannot find config file %s." % jobconffile)
return None
jobconfig.read(jobconffile)
if is_parameter_collection:
all_items = jobconfig.items(section='export_job')
parameters = []
for item_name, item_value in all_items:
if item_name.startswith(parameter_name):
parameters.append(item_value)
return parameters
else:
parameter = jobconfig.get('export_job', parameter_name)
return parameter
|
ake-koomsin/mapnik_nvpr
|
refs/heads/master
|
scons/scons-local-2.2.0/SCons/Tool/JavaCommon.py
|
14
|
"""SCons.Tool.JavaCommon
Stuff for processing Java.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommon.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import os.path
import re
java_parsing = 1
default_java_version = '1.4'
if java_parsing:
# Parse Java files for class names.
#
# This is a really cool parser from Charles Crain
# that finds appropriate class names in Java source.
# A regular expression that will find, in a java file:
# newlines;
# double-backslashes;
# a single-line comment "//";
# single or double quotes preceeded by a backslash;
# single quotes, double quotes, open or close braces, semi-colons,
# periods, open or close parentheses;
# floating-point numbers;
# any alphanumeric token (keyword, class name, specifier);
# any alphanumeric token surrounded by angle brackets (generics);
# the multi-line comment begin and end tokens /* and */;
# array declarations "[]".
_reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' +
r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' +
r'/\*|\*/|\[\])')
class OuterState(object):
"""The initial state for parsing a Java file for classes,
interfaces, and anonymous inner classes."""
def __init__(self, version=default_java_version):
if not version in ('1.1', '1.2', '1.3','1.4', '1.5', '1.6',
'5', '6'):
msg = "Java version %s not supported" % version
raise NotImplementedError(msg)
self.version = version
self.listClasses = []
self.listOutputs = []
self.stackBrackets = []
self.brackets = 0
self.nextAnon = 1
self.localClasses = []
self.stackAnonClassBrackets = []
self.anonStacksStack = [[0]]
self.package = None
def trace(self):
pass
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getPackageState(self):
try:
return self.packageState
except AttributeError:
ret = PackageState(self)
self.packageState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
self.outer_state = self
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def __getAnonStack(self):
return self.anonStacksStack[-1]
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
if len(self.stackBrackets) and \
self.brackets == self.stackBrackets[-1]:
self.listOutputs.append('$'.join(self.listClasses))
self.localClasses.pop()
self.listClasses.pop()
self.anonStacksStack.pop()
self.stackBrackets.pop()
if len(self.stackAnonClassBrackets) and \
self.brackets == self.stackAnonClassBrackets[-1]:
self.__getAnonStack().pop()
self.stackAnonClassBrackets.pop()
def parseToken(self, token):
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
if len(self.listClasses) > 0:
return self.__getAnonClassState()
return self.__getSkipState() # Skip the class name
elif token in ['class', 'interface', 'enum']:
if len(self.listClasses) == 0:
self.nextAnon = 1
self.stackBrackets.append(self.brackets)
return self.__getClassState()
elif token == 'package':
return self.__getPackageState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
def addAnonClass(self):
"""Add an anonymous inner class"""
if self.version in ('1.1', '1.2', '1.3', '1.4'):
clazz = self.listClasses[0]
self.listOutputs.append('%s$%d' % (clazz, self.nextAnon))
elif self.version in ('1.5', '1.6', '5', '6'):
self.stackAnonClassBrackets.append(self.brackets)
className = []
className.extend(self.listClasses)
self.__getAnonStack()[-1] = self.__getAnonStack()[-1] + 1
for anon in self.__getAnonStack():
className.append(str(anon))
self.listOutputs.append('$'.join(className))
self.nextAnon = self.nextAnon + 1
self.__getAnonStack().append(0)
def setPackage(self, package):
self.package = package
class AnonClassState(object):
"""A state that looks for anonymous inner classes."""
def __init__(self, old_state):
# outer_state is always an instance of OuterState
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brace_level = 0
def parseToken(self, token):
# This is an anonymous class if and only if the next
# non-whitespace token is a bracket. Everything between
# braces should be parsed as normal java code.
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '\n':
return self
elif token[0] == '<' and token[-1] == '>':
return self
elif token == '(':
self.brace_level = self.brace_level + 1
return self
if self.brace_level > 0:
if token == 'new':
# look further for anonymous inner class
return SkipState(1, AnonClassState(self))
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == ')':
self.brace_level = self.brace_level - 1
return self
if token == '{':
self.outer_state.addAnonClass()
return self.old_state.parseToken(token)
class SkipState(object):
"""A state that will skip a specified number of tokens before
reverting to the previous state."""
def __init__(self, tokens_to_skip, old_state):
self.tokens_to_skip = tokens_to_skip
self.old_state = old_state
def parseToken(self, token):
self.tokens_to_skip = self.tokens_to_skip - 1
if self.tokens_to_skip < 1:
return self.old_state
return self
class ClassState(object):
"""A state we go into when we hit a class or interface keyword."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
# the next non-whitespace token should be the name of the class
if token == '\n':
return self
# If that's an inner class which is declared in a method, it
# requires an index prepended to the class-name, e.g.
# 'Foo$1Inner' (Tigris Issue 2087)
if self.outer_state.localClasses and \
self.outer_state.stackBrackets[-1] > \
self.outer_state.stackBrackets[-2]+1:
locals = self.outer_state.localClasses[-1]
try:
idx = locals[token]
locals[token] = locals[token]+1
except KeyError:
locals[token] = 1
token = str(locals[token]) + token
self.outer_state.localClasses.append({})
self.outer_state.listClasses.append(token)
self.outer_state.anonStacksStack.append([0])
return self.outer_state
class IgnoreState(object):
"""A state that will ignore all tokens until it gets to a
specified token."""
def __init__(self, ignore_until, old_state):
self.ignore_until = ignore_until
self.old_state = old_state
def parseToken(self, token):
if self.ignore_until == token:
return self.old_state
return self
class PackageState(object):
"""The state we enter when we encounter the package keyword.
We assume the next token will be the package name."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
self.outer_state.setPackage(token)
return self.outer_state
def parse_java_file(fn, version=default_java_version):
return parse_java(open(fn, 'r').read(), version)
def parse_java(contents, version=default_java_version, trace=None):
"""Parse a .java file and return a double of package directory,
plus a list of .class files that compiling that .java file will
produce"""
package = None
initial = OuterState(version)
currstate = initial
for token in _reToken.findall(contents):
# The regex produces a bunch of groups, but only one will
# have anything in it.
currstate = currstate.parseToken(token)
if trace: trace(token, currstate)
if initial.package:
package = initial.package.replace('.', os.sep)
return (package, initial.listOutputs)
else:
# Don't actually parse Java files for class names.
#
# We might make this a configurable option in the future if
# Java-file parsing takes too long (although it shouldn't relative
# to how long the Java compiler itself seems to take...).
def parse_java_file(fn):
""" "Parse" a .java file.
This actually just splits the file name, so the assumption here
is that the file name matches the public class name, and that
the path to the file is the same as the package name.
"""
return os.path.split(file)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
pombredanne/invenio-old
|
refs/heads/master
|
modules/websession/lib/session.py
|
3
|
# -*- coding: utf-8 -*-
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Session management adapted from mod_python Session class.
Just use L{get_session} to obtain a session object (with a dictionary
interface, which will let you store permanent information).
"""
from invenio.webinterface_handler_wsgi_utils import add_cookie, Cookie, get_cookie
import cPickle
import time
import random
import re
import sys
import os
from thread import get_ident
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_WEBSESSION_EXPIRY_LIMIT_REMEMBER, \
CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT
from invenio.websession_config import CFG_WEBSESSION_COOKIE_NAME, \
CFG_WEBSESSION_ONE_DAY, CFG_WEBSESSION_CLEANUP_CHANCE, \
CFG_WEBSESSION_ENABLE_LOCKING
def get_session(req, sid=None):
"""
Obtain a session.
If the session has already been created for the current request,
returns the already existing session.
@param req: the mod_python request object.
@type req: mod_python request object
@param sid: the session identifier of an already existing session.
@type sid: 32 hexadecimal string
@return: the session.
@rtype: InvenioSession
@raise ValueError: if C{sid} is provided and it doesn't correspond to a
valid session.
@note: if the session has already been retrieved once within the current
request handling, the same session object will be returned and, if
provided, the C{sid} parameter will be ignored.
"""
if not hasattr(req, '_session'):
req._session = InvenioSession(req, sid)
return req._session
class InvenioSession(dict):
"""
This class implements a Session handling based on MySQL.
@param req: the mod_python request object.
@type req: mod_python request object
@param sid: the session identifier if already known
@type sid: 32 hexadecimal string
@ivar _remember_me: if the session cookie should last one day or until
the browser is closed.
@type _remember_me: bool
@note: The code is heavily based on ModPython 3.3.1 DBMSession
implementation.
@note: This class implements IP verification to prevent basic cookie
stealing.
@raise ValueError: if C{sid} is provided and correspond to a broken
session.
"""
def __init__(self, req, sid=None):
self._remember_me = False
self._req, self._sid, self._secret = req, sid, None
self._lock = CFG_WEBSESSION_ENABLE_LOCKING
self._new = 1
self._created = 0
self._accessed = 0
self._timeout = 0
self._locked = 0
self._invalid = 0
self._http_ip = None
self._https_ip = None
dict.__init__(self)
if not self._sid:
# check to see if cookie exists
cookie = get_cookie(req, CFG_WEBSESSION_COOKIE_NAME)
if cookie:
self._sid = cookie.value
if self._sid:
if not _check_sid(self._sid):
if sid:
# Supplied explicitly by user of the class,
# raise an exception and make the user code
# deal with it.
raise ValueError("Invalid Session ID: sid=%s" % sid)
else:
# Derived from the cookie sent by browser,
# wipe it out so it gets replaced with a
# correct value.
self._sid = None
if self._sid:
# attempt to load ourselves
self.lock()
if self.load():
self._new = 0
if self._new:
# make a new session
if self._sid:
self.unlock() # unlock old sid
self._sid = _new_sid(self._req)
self.lock() # lock new sid
remote_ip = self._req.remote_ip
if self._req.is_https():
self._https_ip = remote_ip
else:
self._http_ip = remote_ip
add_cookie(self._req, self.make_cookie())
self._created = time.time()
self._timeout = CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT * \
CFG_WEBSESSION_ONE_DAY
self._accessed = time.time()
# need cleanup?
if random.randint(1, CFG_WEBSESSION_CLEANUP_CHANCE) == 1:
self.cleanup()
def set_remember_me(self, remember_me=True):
"""
Set/Unset the L{_remember_me} flag.
@param remember_me: True if the session cookie should last one day or
until the browser is closed.
@type remember_me: bool
"""
self._remember_me = remember_me
if remember_me:
self.set_timeout(CFG_WEBSESSION_EXPIRY_LIMIT_REMEMBER *
CFG_WEBSESSION_ONE_DAY)
else:
self.set_timeout(CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT *
CFG_WEBSESSION_ONE_DAY)
add_cookie(self._req, self.make_cookie())
def load(self):
"""
Load the session from the database.
@return: 1 in case of success, 0 otherwise.
@rtype: integer
"""
session_dict = None
invalid = False
res = run_sql("SELECT session_object FROM session "
"WHERE session_key=%s", (self._sid, ))
if res:
session_dict = cPickle.loads(blob_to_string(res[0][0]))
remote_ip = self._req.remote_ip
if self._req.is_https():
if session_dict['_https_ip'] is not None and \
session_dict['_https_ip'] != remote_ip:
invalid = True
else:
session_dict['_https_ip'] = remote_ip
else:
if session_dict['_http_ip'] is not None and \
session_dict['_http_ip'] != remote_ip:
invalid = True
else:
session_dict['_http_ip'] = remote_ip
if session_dict is None:
return 0
if invalid:
return 0
if (time.time() - session_dict["_accessed"]) > \
session_dict["_timeout"]:
return 0
self._created = session_dict["_created"]
self._accessed = session_dict["_accessed"]
self._timeout = session_dict["_timeout"]
self._remember_me = session_dict["_remember_me"]
self.update(session_dict["_data"])
return 1
def save(self):
"""
Save the session to the database.
"""
if not self._invalid:
session_dict = {"_data" : self.copy(),
"_created" : self._created,
"_accessed": self._accessed,
"_timeout" : self._timeout,
"_http_ip" : self._http_ip,
"_https_ip" : self._https_ip,
"_remember_me" : self._remember_me
}
session_key = self._sid
session_object = cPickle.dumps(session_dict, -1)
session_expiry = time.time() + self._timeout + \
CFG_WEBSESSION_ONE_DAY
uid = self.get('uid', -1)
run_sql("""
INSERT session(
session_key,
session_expiry,
session_object,
uid
) VALUE(%s,
%s,
%s,
%s
) ON DUPLICATE KEY UPDATE
session_expiry=%s,
session_object=%s,
uid=%s
""", (session_key, session_expiry, session_object, uid,
session_expiry, session_object, uid))
def delete(self):
"""
Delete the session.
"""
run_sql("DELETE FROM session WHERE session_key=%s", (self._sid, ))
self.clear()
def invalidate(self):
"""
Declare the session as invalid.
"""
cookie = self.make_cookie()
cookie.expires = 0
add_cookie(self._req, cookie)
self.delete()
self._invalid = 1
if hasattr(self._req, '_session'):
delattr(self._req, '_session')
def make_cookie(self):
"""
Reimplementation of L{BaseSession.make_cookie} method, that also
consider the L{_remember_me} flag
@return: a session cookie.
@rtpye: {mod_python.Cookie.Cookie}
"""
cookie = Cookie(CFG_WEBSESSION_COOKIE_NAME, self._sid)
cookie.path = '/'
if self._remember_me:
cookie.expires = time.time() + self._timeout
return cookie
def initial_http_ip(self):
"""
@return: the initial ip addressed for the HTTP protocol for which this
session was issued.
@rtype: string
@note: it returns None if this session has always been used through
HTTPS requests.
"""
return self._http_ip
def initial_https_ip(self):
"""
@return: the initial ip addressed for the HTTPS protocol for which this
session was issued.
@rtype: string
@note: it returns None if this session has always been used through
HTTP requests.
"""
return self._https_ip
def lock(self):
"""
Lock the session.
"""
if self._lock:
self._locked = 1
def unlock(self):
"""
Unlock the session.
"""
if self._lock and self._locked:
self._locked = 0
def is_new(self):
"""
@return: True if the session has just been created.
@rtype: bool
"""
return not not self._new
def sid(self):
"""
@return: the session identifier.
@rtype: 32 hexadecimal string
"""
return self._sid
def created(self):
"""
@return: the UNIX timestamp for when the session has been created.
@rtype: double
"""
return self._created
def last_accessed(self):
"""
@return: the UNIX timestamp for when the session has been last
accessed.
@rtype: double
"""
return self._accessed
def timeout(self):
"""
@return: the number of seconds from the last accessed timestamp,
after which the session is invalid.
@rtype: double
"""
return self._timeout
def set_timeout(self, secs):
"""
Set the number of seconds from the last accessed timestamp,
after which the session is invalid.
@param secs: the number of seconds.
@type secs: double
"""
self._timeout = secs
def cleanup(self):
"""
Perform the database session cleanup.
"""
def session_cleanup():
"""
Session cleanup procedure which to be executed at the end
of the request handling.
"""
run_sql("""
DELETE FROM session
WHERE session_expiry<=UNIX_TIMESTAMP()
""")
self._req.register_cleanup(session_cleanup)
self._req.log_error("InvenioSession: registered database cleanup.")
def __del__(self):
self.unlock()
def _unlock_session_cleanup(session):
"""
Auxliary function to unlock a session.
"""
session.unlock()
def _init_rnd():
"""
Initialize random number generators.
This is key in multithreaded env, see Python docs for random.
@return: the generators.
@rtype: list of generators
"""
# query max number of threads
gennum = 10
# make generators
# this bit is from Python lib reference
random_generator = random.Random(time.time())
result = [random_generator]
for dummy in range(gennum - 1):
laststate = random_generator.getstate()
random_generator = random.Random()
random_generator.setstate(laststate)
random_generator.jumpahead(1000000)
result.append(random_generator)
return result
_RANDOM_GENERATORS = _init_rnd()
_RANDOM_ITERATOR = iter(_RANDOM_GENERATORS)
def _get_generator():
"""
get rnd_iter.next(), or start over
if we reached the end of it
@return: the next random number.
@rtype: double
"""
global _RANDOM_ITERATOR
try:
return _RANDOM_ITERATOR.next()
except StopIteration:
# the small potential for two threads doing this
# seems does not warrant use of a lock
_RANDOM_ITERATOR = iter(_RANDOM_GENERATORS)
return _RANDOM_ITERATOR.next()
_RE_VALIDATE_SID = re.compile('[0-9a-f]{32}$')
def _check_sid(sid):
"""
Check the validity of the session identifier.
The sid must be 32 characters long, and consisting of the characters
0-9 and a-f.
The sid may be passed in a cookie from the client and as such
should not be trusted. This is particularly important in
FileSession, where the session filename is derived from the sid.
A sid containing '/' or '.' characters could result in a directory
traversal attack
@param sid: the session identifier.
@type sid: string
@return: True if the session identifier is valid.
@rtype: bool
"""
return not not _RE_VALIDATE_SID.match(sid)
def _new_sid(req):
"""
Make a number based on current time, pid, remote ip
and two random ints, then hash with md5. This should
be fairly unique and very difficult to guess.
@param req: the mod_python request object.
@type req: mod_python request object.
@return: the session identifier.
@rtype: 32 hexadecimal string
@warning: The current implementation of _new_sid returns an
md5 hexdigest string. To avoid a possible directory traversal
attack in FileSession the sid is validated using
the _check_sid() method and the compiled regex
validate_sid_re. The sid will be accepted only if len(sid) == 32
and it only contains the characters 0-9 and a-f.
If you change this implementation of _new_sid, make sure to also
change the validation scheme, as well as the test_Session_illegal_sid()
unit test in test/test.py.
"""
the_time = long(time.time()*10000)
pid = os.getpid()
random_generator = _get_generator()
rnd1 = random_generator.randint(0, 999999999)
rnd2 = random_generator.randint(0, 999999999)
remote_ip = req.remote_ip
return md5("%d%d%d%d%s" % (
the_time,
pid,
rnd1,
rnd2,
remote_ip)
).hexdigest()
|
asedunov/intellij-community
|
refs/heads/master
|
python/testData/intentions/numpyDocStub_after.py
|
53
|
def f(x, y):
"""
Parameters
----------
x
y
Returns
-------
"""
return 42
|
RPi-Distro/python-gpiozero
|
refs/heads/master
|
docs/examples/multi_room_motion_alert.py
|
2
|
from gpiozero import LEDBoard, MotionSensor
from gpiozero.pins.pigpio import PiGPIOFactory
from gpiozero.tools import zip_values
from signal import pause
ips = ['192.168.1.3', '192.168.1.4', '192.168.1.5', '192.168.1.6']
remotes = [PiGPIOFactory(host=ip) for ip in ips]
leds = LEDBoard(2, 3, 4, 5) # leds on this pi
sensors = [MotionSensor(17, pin_factory=r) for r in remotes] # remote sensors
leds.source = zip_values(*sensors)
pause()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.