repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
VillageAlliance/django-cms
|
refs/heads/develop
|
tests/project/pluginapp/__init__.py
|
12133432
| |
Ensembles/ert
|
refs/heads/master
|
python/tests/ert/analysis/__init__.py
|
12133432
| |
philipbl/home-assistant
|
refs/heads/dev
|
homeassistant/components/binary_sensor/bloomsky.py
|
18
|
"""
Support the binary sensors of a BloomSky weather station.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.bloomsky/
"""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['bloomsky']
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
'Rain': 'moisture',
'Night': None,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=SENSOR_TYPES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the available BloomSky weather binary sensors."""
bloomsky = get_component('bloomsky')
# Default needed in case of discovery
sensors = config.get(CONF_MONITORED_CONDITIONS, SENSOR_TYPES)
for device in bloomsky.BLOOMSKY.devices.values():
for variable in sensors:
add_devices([BloomSkySensor(bloomsky.BLOOMSKY, device, variable)])
class BloomSkySensor(BinarySensorDevice):
"""Represent a single binary sensor in a BloomSky device."""
def __init__(self, bs, device, sensor_name):
"""Initialize a BloomSky binary sensor."""
self._bloomsky = bs
self._device_id = device['DeviceID']
self._sensor_name = sensor_name
self._name = '{} {}'.format(device['DeviceName'], sensor_name)
self._unique_id = 'bloomsky_binary_sensor {}'.format(self._name)
self.update()
@property
def name(self):
"""The name of the BloomSky device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return SENSOR_TYPES.get(self._sensor_name)
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the BloomSky API."""
self._bloomsky.refresh_devices()
self._state = \
self._bloomsky.devices[self._device_id]['Data'][self._sensor_name]
|
subutai/htmresearch
|
refs/heads/master
|
projects/sequence_prediction/discrete_sequences/plotReberGrammar.py
|
12
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot temporal noise experiment result
"""
import os
from matplotlib import pyplot
import matplotlib as mpl
import numpy
from plot import plotAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
pyplot.ion()
pyplot.close('all')
def computeAccuracy(predictions, truths, sequenceCounter):
accuracy = []
x = []
for i in xrange(len(predictions) - 1):
if truths[i] is None:
continue
correct = predictions[i][0] in truths[i]
accuracy.append(correct)
x.append(sequenceCounter[i])
return (accuracy, x)
if __name__ == '__main__':
experiments = [os.path.join("tm/results", "reber", "0.log"),
os.path.join("lstm/results", "reber-distributed", "0.log"),
os.path.join("elm/results", "reber-basic", "0.log")]
for experiment in experiments:
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['sequenceCounter'])
plotAccuracy((accuracy, x),
data['trains'],
window=100,
type=type,
label='NoiseExperiment',
hideTraining=True,
lineSize=1.0)
pyplot.xlabel('# of sequences seen')
pyplot.ylabel('Prediction accuracy')
pyplot.xlim([0, 250])
pyplot.ylim([0, 1.05])
pyplot.legend(['HTM', 'LSTM', 'ELM'], loc=4)
pyplot.savefig('./result/reber_grammar_performance.pdf')
|
julianprabhakar/eden_car
|
refs/heads/master
|
modules/webkit_url2png.py
|
53
|
#!/usr/bin/env python
import sys
import signal
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import QWebPage
def save_webpage_screenshot(url, width, height, file_name = None):
"""Saves a screenshot of the webpage given in url into filename+".png"
width and height, if given, are in pixels
if not given, the browser's default dimensions will be used.
Needs a call to window.print() from within the webpage.
Example:
save_webpage_screenshot(
"http://www.example.com",
"example",
width=1024,
height=768
)
"""
app = QApplication(sys.argv)
signal.signal(signal.SIGINT, signal.SIG_DFL)
class MyQWebPage(QWebPage):
@pyqtSlot()
def shouldInterruptJavaScript(qwebpage):
print "not interrupting"
return False
webpage = MyQWebPage()
# set page dimensions
webpage.setViewportSize(QSize(int(width), int(height)))
# display errors otherwise debugging is very difficult
def print_error(
message,
lineNumber,
sourceID
):
print "\n%(sourceID)s line %(lineNumber)i: \n %(message)s" % locals()
webpage.javaScriptConsoleMessage = print_error
if file_name is None:
result = []
# register print request handler
def onPrintRequested(virtual_browser_window):
#print "onPrintRequested"
# Paint this frame into an image
image = QImage(
webpage.viewportSize(),
QImage.Format_ARGB32
)
painter = QPainter(image)
virtual_browser_window.render(painter)
painter.end()
if file_name is not None:
image.save(file_name)
else:
byte_array = QByteArray()
buffer = QBuffer(byte_array)
buffer.open(QIODevice.WriteOnly)
image.save(buffer, "PNG")
result.append(str(byte_array))
if __name__ == "__main__":
if file_name is None:
sys.stdout.write(result[0])
sys.exit(0)
else:
app.quit()
webpage.printRequested.connect(onPrintRequested)
# load the page and wait for a print request
webpage.mainFrame().load(QUrl(url))
app.exec_()
if file_name is None:
return result[0]
if __name__ == "__main__":
sys.exit(
save_webpage_screenshot(
*sys.argv[1:]
)
)
|
pattisdr/osf.io
|
refs/heads/develop
|
osf/models/notifications.py
|
11
|
from django.contrib.postgres.fields import ArrayField
from django.db import models
from osf.models import Node
from osf.models import OSFUser
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.validators import validate_subscription_type
from osf.utils.fields import NonNaiveDateTimeField
from website.notifications.constants import NOTIFICATION_TYPES
from website.util import api_v2_url
class NotificationSubscription(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=50, db_index=True, unique=True) # pxyz_wiki_updated, uabc_comment_replies
event_name = models.CharField(max_length=50) # wiki_updated, comment_replies
user = models.ForeignKey('OSFUser', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
node = models.ForeignKey('Node', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
provider = models.ForeignKey('AbstractProvider', related_name='notification_subscriptions',
null=True, blank=True, on_delete=models.CASCADE)
# Notification types
none = models.ManyToManyField('OSFUser', related_name='+') # reverse relationships
email_digest = models.ManyToManyField('OSFUser', related_name='+') # for these
email_transactional = models.ManyToManyField('OSFUser', related_name='+') # are pointless
@classmethod
def load(cls, q):
# modm doesn't throw exceptions when loading things that don't exist
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
return None
@property
def owner(self):
# ~100k have owner==user
if self.user is not None:
return self.user
# ~8k have owner=Node
elif self.node is not None:
return self.node
@owner.setter
def owner(self, value):
if isinstance(value, OSFUser):
self.user = value
elif isinstance(value, Node):
self.node = value
@property
def absolute_api_v2_url(self):
path = '/subscriptions/{}/'.format(self._id)
return api_v2_url(path)
def add_user_to_subscription(self, user, notification_type, save=True):
for nt in NOTIFICATION_TYPES:
if getattr(self, nt).filter(id=user.id).exists():
if nt != notification_type:
getattr(self, nt).remove(user)
else:
if nt == notification_type:
getattr(self, nt).add(user)
if notification_type != 'none' and isinstance(self.owner, Node) and self.owner.parent_node:
user_subs = self.owner.parent_node.child_node_subscriptions
if self.owner._id not in user_subs.setdefault(user._id, []):
user_subs[user._id].append(self.owner._id)
self.owner.parent_node.save()
if save:
self.save()
def remove_user_from_subscription(self, user, save=True):
for notification_type in NOTIFICATION_TYPES:
try:
getattr(self, notification_type, []).remove(user)
except ValueError:
pass
if isinstance(self.owner, Node) and self.owner.parent_node:
try:
self.owner.parent_node.child_node_subscriptions.get(user._id, []).remove(self.owner._id)
self.owner.parent_node.save()
except ValueError:
pass
if save:
self.save()
class NotificationDigest(ObjectIDMixin, BaseModel):
user = models.ForeignKey('OSFUser', null=True, blank=True, on_delete=models.CASCADE)
provider = models.ForeignKey('AbstractProvider', null=True, blank=True, on_delete=models.CASCADE)
timestamp = NonNaiveDateTimeField()
send_type = models.CharField(max_length=50, db_index=True, validators=[validate_subscription_type, ])
event = models.CharField(max_length=50)
message = models.TextField()
# TODO: Could this be a m2m with or without an order field?
node_lineage = ArrayField(models.CharField(max_length=5))
|
sidmitra/django_nonrel_testapp
|
refs/heads/master
|
django/contrib/auth/backends.py
|
230
|
from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
|
OTWillems/GEO1005
|
refs/heads/master
|
TwisterSolutions/test/test_init.py
|
121
|
# coding=utf-8
"""Tests QGIS plugin init."""
__author__ = 'Tim Sutton <tim@linfiniti.com>'
__revision__ = '$Format:%H$'
__date__ = '17/10/2010'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import unittest
import logging
import ConfigParser
LOGGER = logging.getLogger('QGIS')
class TestInit(unittest.TestCase):
"""Test that the plugin init is usable for QGIS.
Based heavily on the validator class by Alessandro
Passoti available here:
http://github.com/qgis/qgis-django/blob/master/qgis-app/
plugins/validator.py
"""
def test_read_init(self):
"""Test that the plugin __init__ will validate on plugins.qgis.org."""
# You should update this list according to the latest in
# https://github.com/qgis/qgis-django/blob/master/qgis-app/
# plugins/validator.py
required_metadata = [
'name',
'description',
'version',
'qgisMinimumVersion',
'email',
'author']
file_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir,
'metadata.txt'))
LOGGER.info(file_path)
metadata = []
parser = ConfigParser.ConfigParser()
parser.optionxform = str
parser.read(file_path)
message = 'Cannot find a section named "general" in %s' % file_path
assert parser.has_section('general'), message
metadata.extend(parser.items('general'))
for expectation in required_metadata:
message = ('Cannot find metadata "%s" in metadata source (%s).' % (
expectation, file_path))
self.assertIn(expectation, dict(metadata), message)
if __name__ == '__main__':
unittest.main()
|
isrohutamahopetechnik/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/polynomial/setup.py
|
95
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('polynomial',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
longman694/youtube-dl
|
refs/heads/mod
|
youtube_dl/extractor/hbo.py
|
40
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
xpath_text,
xpath_element,
int_or_none,
parse_duration,
)
class HBOBaseIE(InfoExtractor):
_FORMATS_INFO = {
'pro7': {
'width': 1280,
'height': 720,
},
'1920': {
'width': 1280,
'height': 720,
},
'pro6': {
'width': 768,
'height': 432,
},
'640': {
'width': 768,
'height': 432,
},
'pro5': {
'width': 640,
'height': 360,
},
'highwifi': {
'width': 640,
'height': 360,
},
'high3g': {
'width': 640,
'height': 360,
},
'medwifi': {
'width': 400,
'height': 224,
},
'med3g': {
'width': 400,
'height': 224,
},
}
def _extract_from_id(self, video_id):
video_data = self._download_xml(
'http://render.lv3.hbo.com/data/content/global/videos/data/%s.xml' % video_id, video_id)
title = xpath_text(video_data, 'title', 'title', True)
formats = []
for source in xpath_element(video_data, 'videos', 'sources', True):
if source.tag == 'size':
path = xpath_text(source, './/path')
if not path:
continue
width = source.attrib.get('width')
format_info = self._FORMATS_INFO.get(width, {})
height = format_info.get('height')
fmt = {
'url': path,
'format_id': 'http%s' % ('-%dp' % height if height else ''),
'width': format_info.get('width'),
'height': height,
}
rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', path)
if rtmp:
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
'format_id': fmt['format_id'].replace('http', 'rtmp'),
})
formats.append(fmt)
else:
video_url = source.text
if not video_url:
continue
if source.tag == 'tarball':
formats.extend(self._extract_m3u8_formats(
video_url.replace('.tar', '/base_index_w8.m3u8'),
video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif source.tag == 'hls':
m3u8_formats = self._extract_m3u8_formats(
video_url.replace('.tar', '/base_index.m3u8'),
video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
for f in m3u8_formats:
if f.get('vcodec') == 'none' and not f.get('tbr'):
f['tbr'] = int_or_none(self._search_regex(
r'-(\d+)k/', f['url'], 'tbr', default=None))
formats.extend(m3u8_formats)
elif source.tag == 'dash':
formats.extend(self._extract_mpd_formats(
video_url.replace('.tar', '/manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
else:
format_info = self._FORMATS_INFO.get(source.tag, {})
formats.append({
'format_id': 'http-%s' % source.tag,
'url': video_url,
'width': format_info.get('width'),
'height': format_info.get('height'),
})
self._sort_formats(formats)
thumbnails = []
card_sizes = xpath_element(video_data, 'titleCardSizes')
if card_sizes is not None:
for size in card_sizes:
path = xpath_text(size, 'path')
if not path:
continue
width = int_or_none(size.get('width'))
thumbnails.append({
'id': width,
'url': path,
'width': width,
})
return {
'id': video_id,
'title': title,
'duration': parse_duration(xpath_text(video_data, 'duration/tv14')),
'formats': formats,
'thumbnails': thumbnails,
}
class HBOIE(HBOBaseIE):
IE_NAME = 'hbo'
_VALID_URL = r'https?://(?:www\.)?hbo\.com/video/video\.html\?.*vid=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.hbo.com/video/video.html?autoplay=true&g=u&vid=1437839',
'md5': '2c6a6bc1222c7e91cb3334dad1746e5a',
'info_dict': {
'id': '1437839',
'ext': 'mp4',
'title': 'Ep. 64 Clip: Encryption',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 1072,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_from_id(video_id)
class HBOEpisodeIE(HBOBaseIE):
IE_NAME = 'hbo:episode'
_VALID_URL = r'https?://(?:www\.)?hbo\.com/(?P<path>(?!video)(?:(?:[^/]+/)+video|watch-free-episodes)/(?P<id>[0-9a-z-]+))(?:\.html)?'
_TESTS = [{
'url': 'http://www.hbo.com/girls/episodes/5/52-i-love-you-baby/video/ep-52-inside-the-episode.html?autoplay=true',
'md5': '61ead79b9c0dfa8d3d4b07ef4ac556fb',
'info_dict': {
'id': '1439518',
'display_id': 'ep-52-inside-the-episode',
'ext': 'mp4',
'title': 'Ep. 52: Inside the Episode',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 240,
},
}, {
'url': 'http://www.hbo.com/game-of-thrones/about/video/season-5-invitation-to-the-set.html?autoplay=true',
'only_matching': True,
}, {
'url': 'http://www.hbo.com/watch-free-episodes/last-week-tonight-with-john-oliver',
'only_matching': True,
}]
def _real_extract(self, url):
path, display_id = re.match(self._VALID_URL, url).groups()
content = self._download_json(
'http://www.hbo.com/api/content/' + path, display_id)['content']
video_id = compat_str((content.get('parsed', {}).get(
'common:FullBleedVideo', {}) or content['selectedEpisode'])['videoId'])
info_dict = self._extract_from_id(video_id)
info_dict['display_id'] = display_id
return info_dict
|
itghisi/simplewidgets
|
refs/heads/master
|
simplewidgets/PyQt/QtGui.py
|
1
|
try:
from PySide.QtGui import *
except ImportError:
try:
from PyQt5.QtGui import *
except ImportError:
raise RuntimeError("No Python-Qt bindings found")
|
kingsamchen/Eureka
|
refs/heads/master
|
crack-data-structures-and-algorithms/leetcode/python-impl/roman_to_integer_q13.py
|
1
|
# 核心思路
# 因为罗马数字的拼写规则,如果value(s[i]) < value(s[i+1])
# 那么数值需要 -value(s[i]) + value(s[i+1])
# 可以利用这点一次遍历完成
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
alphabet = {
'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000
}
value = 0
for i in range(len(s)):
if i < len(s) - 1 and alphabet[s[i]] < alphabet[s[i+1]]:
value -= alphabet[s[i]]
else:
value += alphabet[s[i]]
return value
|
eduNEXT/edunext-ecommerce
|
refs/heads/master
|
ecommerce/extensions/voucher/migrations/0003_orderlinevouchers.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0009_auto_20150709_1205'),
('voucher', '0002_couponvouchers'),
]
operations = [
migrations.CreateModel(
name='OrderLineVouchers',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('line', models.ForeignKey(related_name='order_line_vouchers', to='order.Line', on_delete=models.CASCADE)),
('vouchers', models.ManyToManyField(related_name='order_line_vouchers', to='voucher.Voucher', blank=True)),
],
),
]
|
eclipse-ease-addons/engines
|
refs/heads/master
|
jython/org.jython/Lib/tabnanny.py
|
394
|
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
|
phpython/phpython
|
refs/heads/master
|
demo/phpython/exec/script1.py
|
2
|
print "module1 body"
|
Eficent/odoomrp-wip
|
refs/heads/8.0
|
procurement_sale_forecast/wizard/__init__.py
|
24
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import sale_forecast_load
from . import make_procurement
|
django-nonrel/django
|
refs/heads/nonrel-1.6
|
django/contrib/admin/views/main.py
|
47
|
import sys
import warnings
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.datastructures import SortedDict
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import DisallowedModelAdminLookup, DisallowedModelAdminToField
from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR
from django.contrib.admin.util import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
def _is_changelist_popup(request):
"""
Returns True if the popup GET parameter is set.
This function is introduced to facilitate deprecating the legacy
value for IS_POPUP_VAR and should be removed at the end of the
deprecation cycle.
"""
if IS_POPUP_VAR in request.GET:
return True
IS_LEGACY_POPUP_VAR = 'pop'
if IS_LEGACY_POPUP_VAR in request.GET:
warnings.warn(
"The `%s` GET parameter has been renamed to `%s`." %
(IS_LEGACY_POPUP_VAR, IS_POPUP_VAR),
PendingDeprecationWarning, 2)
return True
return False
class RenameChangeListMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', PendingDeprecationWarning),
)
class ChangeList(six.with_metaclass(RenameChangeListMethods)):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = _is_changelist_popup(request)
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
@property
def root_query_set(self):
warnings.warn("`ChangeList.root_query_set` is deprecated, "
"use `root_queryset` instead.",
PendingDeprecationWarning, 2)
return self.root_queryset
@property
def query_set(self):
warnings.warn("`ChangeList.query_set` is deprecated, "
"use `queryset` instead.",
PendingDeprecationWarning, 2)
return self.queryset
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
# Normalize the types of keys
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[force_str(key)] = value
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization:
# full_result_count is equal to paginator.count if no filters
# were applied
if self.get_filters_params() or self.params.get(SEARCH_VAR):
full_result_count = self.root_queryset.count()
else:
full_result_count = result_count
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
|
Gregory-Howard/spaCy
|
refs/heads/master
|
spacy/tests/serialize/__init__.py
|
12133432
| |
dagurval/bitcoinxt
|
refs/heads/master
|
qa/rpc-tests/test_framework/__init__.py
|
12133432
| |
bufferapp/buffer-django-nonrel
|
refs/heads/master
|
tests/modeltests/user_commands/management/commands/__init__.py
|
12133432
| |
michel-slm/obnam
|
refs/heads/fix-py26-compat
|
obnamlib/__init__.py
|
1
|
# Copyright (C) 2009-2014 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cliapp
__version__ = '1.8'
# Import _obnam if it is there. We need to be able to do things without
# it, especially at build time, while we're generating manual pages.
# If _obnam is not there, substitute a dummy that throws an exception
# if used.
class DummyExtension(object):
def __getattr__(self, name):
raise Exception('Trying to use _obnam, but that was not found.')
try:
import _obnam
except ImportError:
_obnam = DummyExtension()
# Exceptions defined by Obnam itself. They should all be a subclass
# of obnamlib.ObnamError.
from structurederror import StructuredError
class ObnamError(StructuredError):
pass
DEFAULT_NODE_SIZE = 256 * 1024 # benchmarked on 2011-09-01
DEFAULT_CHUNK_SIZE = 1024 * 1024 # benchmarked on 2011-09-01
DEFAULT_UPLOAD_QUEUE_SIZE = 128
DEFAULT_LRU_SIZE = 256
DEFAULT_CHUNKIDS_PER_GROUP = 1024
DEFAULT_NAGIOS_WARN_AGE = '27h'
DEFAULT_NAGIOS_CRIT_AGE = '8d'
# The following values have been determined empirically on a laptop
# with an encrypted ext4 filesystem. Other values might be better for
# other situations.
IDPATH_DEPTH = 3
IDPATH_BITS = 12
IDPATH_SKIP = 13
# Maximum identifier for clients, chunks, files, etc. This is the largest
# unsigned 64-bit value. In various places we assume 64-bit field sizes
# for on-disk data structures.
MAX_ID = 2**64 - 1
option_group = {
'perf': 'Performance tweaking',
'devel': 'Development of Obnam itself',
}
from sizeparse import SizeSyntaxError, UnitNameError, ByteSizeParser
from encryption import (generate_symmetric_key,
encrypt_symmetric,
decrypt_symmetric,
get_public_key,
get_public_key_user_ids,
Keyring,
SecretKeyring,
encrypt_with_keyring,
decrypt_with_secret_keys,
SymmetricKeyCache,
EncryptionError)
from hooks import (
Hook, MissingFilterError, NoFilterTagError, FilterHook, HookManager)
from pluginbase import ObnamPlugin
from vfs import (
VirtualFileSystem,
VfsFactory,
VfsTests,
LockFail,
NEW_DIR_MODE,
NEW_FILE_MODE)
from vfs_local import LocalFS
from fsck_work_item import WorkItem
from lockmgr import LockManager
from forget_policy import ForgetPolicy
from app import App, ObnamIOError, ObnamSystemError
from humanise import humanise_duration, humanise_size, humanise_speed
from repo_factory import (
RepositoryFactory,
UnknownRepositoryFormat,
UnknownRepositoryFormatWanted)
from repo_interface import (
RepositoryInterface,
RepositoryInterfaceTests,
RepositoryClientAlreadyExists,
RepositoryClientDoesNotExist,
RepositoryClientListNotLocked,
RepositoryClientListLockingFailed,
RepositoryClientLockingFailed,
RepositoryClientNotLocked,
RepositoryClientKeyNotAllowed,
RepositoryClientGenerationUnfinished,
RepositoryGenerationKeyNotAllowed,
RepositoryGenerationDoesNotExist,
RepositoryClientHasNoGenerations,
RepositoryFileDoesNotExistInGeneration,
RepositoryFileKeyNotAllowed,
RepositoryChunkDoesNotExist,
RepositoryChunkContentNotInIndexes,
RepositoryChunkIndexesNotLocked,
RepositoryChunkIndexesLockingFailed,
repo_key_name,
REPO_CLIENT_TEST_KEY,
REPO_GENERATION_TEST_KEY,
REPO_GENERATION_STARTED,
REPO_GENERATION_ENDED,
REPO_GENERATION_IS_CHECKPOINT,
REPO_GENERATION_FILE_COUNT,
REPO_GENERATION_TOTAL_DATA,
REPO_FILE_TEST_KEY,
REPO_FILE_MODE,
REPO_FILE_MTIME_SEC,
REPO_FILE_MTIME_NSEC,
REPO_FILE_ATIME_SEC,
REPO_FILE_ATIME_NSEC,
REPO_FILE_NLINK,
REPO_FILE_SIZE,
REPO_FILE_UID,
REPO_FILE_USERNAME,
REPO_FILE_GID,
REPO_FILE_GROUPNAME,
REPO_FILE_SYMLINK_TARGET,
REPO_FILE_XATTR_BLOB,
REPO_FILE_BLOCKS,
REPO_FILE_DEV,
REPO_FILE_INO,
REPO_FILE_MD5,
REPO_FILE_INTEGER_KEYS)
#
# Repository format dummy specific modules.
#
from repo_dummy import RepositoryFormatDummy
#
# Repository format 6 specific modules.
#
from metadata import (
Metadata,
read_metadata,
set_metadata,
SetMetadataError,
metadata_fields)
from fmt_6.repo_fmt_6 import RepositoryFormat6
from fmt_6.repo_tree import RepositoryTree
from fmt_6.chunklist import ChunkList
from fmt_6.clientlist import ClientList
from fmt_6.checksumtree import ChecksumTree
from fmt_6.clientmetadatatree import ClientMetadataTree
__all__ = locals()
|
brandonlogan/octavia
|
refs/heads/master
|
octavia/common/utils.py
|
1
|
# Copyright 2011, VMware, Inc., 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import datetime
import hashlib
import random
import socket
# from eventlet.green import subprocess
# from oslo.config import cfg
from octavia.openstack.common import excutils
from octavia.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_hostname():
return socket.gethostname()
def get_random_string(length):
"""Get a random hex string of the specified length.
based on Cinder library
cinder/transfer/api.py
"""
rndstr = ""
random.seed(datetime.datetime.now().microsecond)
while len(rndstr) < length:
rndstr += hashlib.sha224(str(random.random())).hexdigest()
return rndstr[0:length]
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
LOG = logging.getLogger(func.__module__)
self.logger = LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return call
|
nikitasingh981/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/approximate.py
|
14
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
octopus-platform/joern
|
refs/heads/dev
|
python/joern-tools/joern/shelltool/PlotConfiguration.py
|
1
|
#!/usr/bin/env python3
import re
class PlotConfiguration:
def __init__(self):
self.config = []
def _matchRulePattern(self,pattern_type,pattern_rule,element_properties):
if pattern_rule == '*': return True
k,v = pattern_rule.split('.',maxsplit=1)
prop = element_properties.get(k)
if not prop: return False
if v == '*': return True
if v == prop: return True
return False
def getLayout(self,element_type,element_properties):
layout_params = {}
for pt_elt,pt_type,pt_rule,pt_val in self.config:
if pt_elt == element_type and pt_type == 'layout':
if self._matchRulePattern(pt_type,pt_rule,element_properties):
layout_params = self.createLayoutParams(layout_params,pt_val)
return layout_params
def getElementLayout(self,graph_element):
return self.getLayout(graph_element.getElementType(),graph_element.getProperties())
def getDisplayItems(self,element_type,element_properties):
display_items = []
for pt_elt,pt_type,pt_rule,pt_val in self.config:
if pt_elt == element_type and pt_type == 'display':
if self._matchRulePattern(pt_type,pt_rule,element_properties):
display_items = self.createDisplayItems(display_items,element_properties,pt_val)
return [ x[1] for x in display_items ]
def getElementDisplayItems(self,graph_element):
"""return a list of items to display that can be formatted. It is a
list of lists containing [key,value] or [value]."""
return self.getDisplayItems(graph_element.getElementType(),graph_element.getProperties())
def _getItemsFromSpec(self,spec,data,withkey=False):
if spec == "*":
if withkey:
return [(k,[k,v]) for k,v in data.items()]
else:
return [(k,[v]) for k,v in data.items()]
try:
if withkey:
return [ (spec,[ spec, data[spec] ]) ]
return [ (spec,[ data[spec] ]) ]
except KeyError:
return []
def createDisplayItems(self,current_items,element_properties,values):
if len(values)>0 and values[0] == "+":
items = current_items
key_specs = values[1:].split(",")
else:
items = []
key_specs = values.split(",")
for ks in key_specs:
if ks == '': continue
if ks[0] == '&':
items += self._getItemsFromSpec(ks[1:],element_properties,withkey=True)
elif ks[0] == '-':
# remove spec from items
items = [ x for x in items if x[0] != ks[1:] ]
pass
else:
items += self._getItemsFromSpec(ks,element_properties,withkey=False)
return items
def createLayoutParams(self,current_items,values):
if len(values)>0 and values[0]=="+":
layout_items = current_items
vs = values[1:].split(",")
else:
layout_items = {}
vs = values.split(",")
l = [ tuple(v.split("=",maxsplit=1)) for v in vs if v != '']
layout_items.update( dict([ v for v in l if len(v)==2 ]) )
return layout_items
def _parseConfigLine(self,line):
try:
pattern_selector,pattern = line.split('=',maxsplit=1)
pattern_element,pattern_type = pattern_selector.split('.',maxsplit=1)
pattern_rule,pattern_value = pattern.split(':',maxsplit=1)
return [ pattern_element, pattern_type, pattern_rule, pattern_value ]
except ValueError:
return None
def parse(self,configfile):
for l in configfile.readlines():
if re.match(r'^\s*#',l): continue
rule = self._parseConfigLine(l.strip())
if rule:
self.config.append(rule)
|
ZhangXinNan/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py
|
18
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional average pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalAvgTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
_SEED = 341261001
_SEED2 = 341261002
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform average pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.mean(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform average pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalAvgPoolResult is 'automated', it feels safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractionalAvgPool and _GetExpectedFractionalAvgPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalAvgPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
tensor_output, row_seq, col_seq = sess.run([p, r, c])
expected_result = self._GetExpectedFractionalAvgPoolResult(
rand_mat.astype(np.float32), row_seq, col_seq, overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalAvgPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_avg_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test FractionalAvgPool works fine when input tensor is integer type.
I would have used _ValidateFractionalAvgPoolResult function to automate this
process, however, there's rounding issue. It is caused by numpy.mean cast
integer input to numpy.float64 for intermediate use. While for
fractional_avg_pool, the mean operation is integer division (trucated). So,
for this test case, I will hard code a simple matrix.
"""
pseudo_random = True
overlapping = True
tensor_shape = (1, 6, 6, 1)
# pyformat: disable
mat = np.array([
[2, 6, 4, 1, 3, 6],
[8, 9, 1, 6, 6, 8],
[3, 9, 8, 2, 5, 6],
[2, 7, 9, 5, 4, 5],
[8, 5, 0, 5, 7, 4],
[4, 4, 5, 9, 7, 2]
])
# pyformat: enable
with self.test_session() as sess:
# Since deterministic = True, seed and seed2 are fixed. Therefore r, and c
# are the same each time. We can have an expected result precomputed.
# r = [0, 2, 4, 6]
# c = [0, 1, 3, 4, 6]
# pyformat: disable
expected = np.array([
[6, 5, 3, 5],
[5, 5, 4, 5],
[5, 4, 7, 5]
]).reshape((1, 3, 4, 1))
# pyformat: enable
p, unused_r, unused_c = nn_ops.fractional_avg_pool(
mat.reshape(tensor_shape), [1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual = sess.run(p)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular average pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
def testDifferentInputTensorShape(self):
"""Runs the operation in one session with different input tensor shapes."""
with self.test_session() as sess:
input_holder = array_ops.placeholder(dtypes.float32,
[None, None, None, 3])
pooling_ratio = [1, 1.5, 1.5, 1]
pseudo_random = False
overlapping = False
p, r, c = nn_ops.fractional_avg_pool(
input_holder,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
# First run.
input_a = np.zeros([3, 32, 32, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_a})
expected = self._GetExpectedFractionalAvgPoolResult(
input_a, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
# Second run.
input_b = np.zeros([4, 60, 60, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_b})
expected = self._GetExpectedFractionalAvgPoolResult(
input_b, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
class FractionalAvgPoolGradTest(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
This type of test relies on gen_nn_ops.avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really important, since 10/2 is divisible
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261004)
_SEED = 341261005
_SEED2 = 341261006
def _GenerateRandomInputTensor(self, shape):
num_elements = 1
for dim_size in shape:
num_elements *= dim_size
x = self._PRNG.rand(num_elements) * 1000
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
if __name__ == "__main__":
test.main()
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/requests-2.7.0/requests/packages/chardet/euctwprober.py
|
2993
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
wiltonlazary/arangodb
|
refs/heads/devel
|
3rdParty/boost/1.61.0/tools/build/src/tools/types/__init__.py
|
61
|
__all__ = [
'asm',
'cpp',
'exe',
'html',
'lib',
'obj',
'preprocessed',
'rsp',
]
def register_all ():
for i in __all__:
m = __import__ (__name__ + '.' + i)
reg = i + '.register ()'
#exec (reg)
# TODO: (PF) I thought these would be imported automatically. Anyone knows why they aren't?
register_all ()
|
titansgroup/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/shortdata/region_SL.py
|
11
|
"""Auto-generated file, do not edit by hand. SL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SL = PhoneMetadata(id='SL', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[069]\\d{2,4}', possible_number_pattern='\\d{3,5}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='(?:01|99)9', possible_number_pattern='\\d{3}', example_number='999'),
short_code=PhoneNumberDesc(national_number_pattern='(?:01|99)9|60400', possible_number_pattern='\\d{3,5}', example_number='999'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='60400', possible_number_pattern='\\d{5}', example_number='60400'),
short_data=True)
|
Qirky/PyKinectTk
|
refs/heads/master
|
PyKinectTk/utils/Env.py
|
1
|
"""
Env.py
This module sets up the intial work environment and database
"""
from SQL import *
import Skeleton
from os.path import realpath, abspath, join
from os.path import isdir, isfile, dirname
def local(filename):
""" Returns the realpath for a file in THIS directory """
return abspath(join(dirname(__file__), filename))
def getpath(filename):
""" File should contain just one line; the root directory """
try:
with open(filename) as f:
path = realpath(f.read().strip())
if not isdir(path):
raise
except:
path = "."
return path
class Root:
def __init__(self, path):
self.path = path
def __str__(self):
return self.path
def __add__(self, s):
return join(self.path, s)
def add(self, s):
return Root(self + s)
# Location of config file
config = local("Settings/config")
#: DIR Reads the known location of the work folder from the hidden file, 'config'
DIR = Root( getpath( config ) )
#: Work Directory Constants
DATABASE = DIR + 'Recordings.db'
AUDIO_DIR = DIR.add( "AUDIO" )
VIDEO_DIR = DIR.add( "VIDEO" )
XEF_DIR = DIR.add( "XEF" )
IMAGE_DIR = DIR.add( "IMAGE" )
SUBDIRECTORIES = [
AUDIO_DIR,
VIDEO_DIR,
XEF_DIR,
IMAGE_DIR
]
# Error messages to display
PYGAME_ERROR = "ImportError: Kinect data playback requires PyGame v1.9"
# This value is used to divided
TIME_DIV = 10000000.0
def CreateEnvironment():
""" Checks if the necessary folders exist and creates them if not """
from os import mkdir
directories = SUBDIRECTORIES
for d in directories:
path = str(d)
if not isdir(path):
mkdir(path)
# Check if our database is there, if not then create environment
if not isfile(DATABASE):
CreateDatabase(DATABASE)
return True
|
10clouds/edx-platform
|
refs/heads/dev
|
common/lib/xmodule/xmodule/tests/test_fields.py
|
78
|
"""Tests for classes defined in fields.py."""
import datetime
import unittest
from django.utils.timezone import UTC
from xmodule.fields import Date, Timedelta, RelativeTime
from xmodule.timeinfo import TimeInfo
class DateTest(unittest.TestCase):
date = Date()
def compare_dates(self, dt1, dt2, expected_delta):
self.assertEqual(
dt1 - dt2,
expected_delta,
str(dt1) + "-" + str(dt2) + "!=" + str(expected_delta)
)
def test_from_json(self):
"""Test conversion from iso compatible date strings to struct_time"""
self.compare_dates(
DateTest.date.from_json("2013-01-01"),
DateTest.date.from_json("2012-12-31"),
datetime.timedelta(days=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00"),
DateTest.date.from_json("2012-12-31T23"),
datetime.timedelta(hours=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00"),
DateTest.date.from_json("2012-12-31T23:59"),
datetime.timedelta(minutes=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00"),
DateTest.date.from_json("2012-12-31T23:59:59"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2013-01-01T00:00:00Z"),
DateTest.date.from_json("2012-12-31T23:59:59Z"),
datetime.timedelta(seconds=1)
)
self.compare_dates(
DateTest.date.from_json("2012-12-31T23:00:01-01:00"),
DateTest.date.from_json("2013-01-01T00:00:00+01:00"),
datetime.timedelta(hours=1, seconds=1)
)
def test_enforce_type(self):
self.assertEqual(DateTest.date.enforce_type(None), None)
self.assertEqual(DateTest.date.enforce_type(""), None)
self.assertEqual(
DateTest.date.enforce_type("2012-12-31T23:00:01"),
datetime.datetime(2012, 12, 31, 23, 0, 1, tzinfo=UTC())
)
self.assertEqual(
DateTest.date.enforce_type(1234567890000),
datetime.datetime(2009, 2, 13, 23, 31, 30, tzinfo=UTC())
)
self.assertEqual(
DateTest.date.enforce_type(datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC())),
datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC())
)
with self.assertRaises(TypeError):
DateTest.date.enforce_type([1])
def test_return_None(self):
self.assertIsNone(DateTest.date.from_json(""))
self.assertIsNone(DateTest.date.from_json(None))
with self.assertRaises(TypeError):
DateTest.date.from_json(['unknown value'])
def test_old_due_date_format(self):
current = datetime.datetime.today()
self.assertEqual(
datetime.datetime(current.year, 3, 12, 12, tzinfo=UTC()),
DateTest.date.from_json("March 12 12:00")
)
self.assertEqual(
datetime.datetime(current.year, 12, 4, 16, 30, tzinfo=UTC()),
DateTest.date.from_json("December 4 16:30")
)
self.assertIsNone(DateTest.date.from_json("12 12:00"))
def test_non_std_from_json(self):
"""
Test the non-standard args being passed to from_json
"""
now = datetime.datetime.now(UTC())
delta = now - datetime.datetime.fromtimestamp(0, UTC())
self.assertEqual(
DateTest.date.from_json(delta.total_seconds() * 1000),
now
)
yesterday = datetime.datetime.now(UTC()) - datetime.timedelta(days=-1)
self.assertEqual(DateTest.date.from_json(yesterday), yesterday)
def test_to_json(self):
"""
Test converting time reprs to iso dates
"""
self.assertEqual(
DateTest.date.to_json(datetime.datetime.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:59:59Z")),
"2012-12-31T23:59:59Z"
)
self.assertEqual(
DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:00:01-01:00")),
"2012-12-31T23:00:01-01:00"
)
with self.assertRaises(TypeError):
DateTest.date.to_json('2012-12-31T23:00:01-01:00')
class TimedeltaTest(unittest.TestCase):
delta = Timedelta()
def test_from_json(self):
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 12 hours 59 minutes 59 seconds'),
datetime.timedelta(days=1, hours=12, minutes=59, seconds=59)
)
self.assertEqual(
TimedeltaTest.delta.from_json('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
def test_enforce_type(self):
self.assertEqual(TimedeltaTest.delta.enforce_type(None), None)
self.assertEqual(
TimedeltaTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
TimedeltaTest.delta.enforce_type('1 day 46799 seconds'),
datetime.timedelta(days=1, seconds=46799)
)
with self.assertRaises(TypeError):
TimedeltaTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
'1 days 46799 seconds',
TimedeltaTest.delta.to_json(datetime.timedelta(days=1, hours=12, minutes=59, seconds=59))
)
class TimeInfoTest(unittest.TestCase):
def test_time_info(self):
due_date = datetime.datetime(2000, 4, 14, 10, tzinfo=UTC())
grace_pd_string = '1 day 12 hours 59 minutes 59 seconds'
timeinfo = TimeInfo(due_date, grace_pd_string)
self.assertEqual(
timeinfo.close_date,
due_date + Timedelta().from_json(grace_pd_string)
)
class RelativeTimeTest(unittest.TestCase):
delta = RelativeTime()
def test_from_json(self):
self.assertEqual(
RelativeTimeTest.delta.from_json('0:05:07'),
datetime.timedelta(seconds=307)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(100.0),
datetime.timedelta(seconds=100)
)
self.assertEqual(
RelativeTimeTest.delta.from_json(None),
datetime.timedelta(seconds=0)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.from_json(1234) # int
with self.assertRaises(ValueError):
RelativeTimeTest.delta.from_json("77:77:77")
def test_enforce_type(self):
self.assertEqual(RelativeTimeTest.delta.enforce_type(None), None)
self.assertEqual(
RelativeTimeTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)),
datetime.timedelta(days=1, seconds=46799)
)
self.assertEqual(
RelativeTimeTest.delta.enforce_type('0:05:07'),
datetime.timedelta(seconds=307)
)
with self.assertRaises(TypeError):
RelativeTimeTest.delta.enforce_type([1])
def test_to_json(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"00:00:00",
RelativeTimeTest.delta.to_json(None)
)
self.assertEqual(
"00:01:40",
RelativeTimeTest.delta.to_json(100.0)
)
error_msg = "RelativeTime max value is 23:59:59=86400.0 seconds, but 90000.0 seconds is passed"
with self.assertRaisesRegexp(ValueError, error_msg):
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=90000))
with self.assertRaises(TypeError):
RelativeTimeTest.delta.to_json("123")
def test_str(self):
self.assertEqual(
"01:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723))
)
self.assertEqual(
"11:02:03",
RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=39723))
)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/refactoring/inlineFunction/removeTypingOverrides/main.py
|
12
|
from typing import overload
class MyClass:
def __init__(self, my_val):
self.my_val = my_val
@overload
def method(self, x: int) -> int:
pass
@overload
def method(self, x: str) -> str:
pass
def method(self, x):
print(self.my_val)
print(x)
return x
my_class = MyClass(1)
res = my_class.met<caret>hod(2)
|
kant/inasafe
|
refs/heads/develop
|
safe/impact_functions/generic/classified_raster_building/impact_function.py
|
2
|
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Generic Impact function on
Building for Classified Hazard.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'lucernae'
__date__ = '23/03/15'
import logging
from collections import OrderedDict
from numpy import round as numpy_round
from safe.impact_functions.bases.classified_rh_classified_ve import \
ClassifiedRHClassifiedVE
from safe.storage.vector import Vector
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from safe.utilities.i18n import tr
from safe.common.utilities import get_osm_building_usage
from safe.impact_functions.generic.classified_raster_building\
.metadata_definitions import ClassifiedRasterHazardBuildingMetadata
from safe.impact_reports.building_exposure_report_mixin import (
BuildingExposureReportMixin)
from safe.common.exceptions import KeywordNotFoundError
LOGGER = logging.getLogger('InaSAFE')
class ClassifiedRasterHazardBuildingFunction(
ClassifiedRHClassifiedVE,
BuildingExposureReportMixin):
"""Impact plugin for classified hazard impact on building data"""
_metadata = ClassifiedRasterHazardBuildingMetadata()
# Function documentation
def __init__(self):
super(ClassifiedRasterHazardBuildingFunction, self).__init__()
self.affected_field = 'affected'
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
return [
{
'content': tr('Notes'),
'header': True
},
{
'content': tr(
'Map shows buildings affected in low, medium and '
'high hazard class areas.')
}]
def run(self):
"""Classified hazard impact to buildings (e.g. from Open Street Map).
"""
self.validate()
self.prepare()
# Value from layer's keywords
# Try to get the value from keyword, if not exist, it will not fail,
# but use the old get_osm_building_usage
try:
structure_class_field = self.exposure.keyword(
'structure_class_field')
except KeywordNotFoundError:
structure_class_field = None
# The 3 classes
categorical_hazards = self.parameters['Categorical hazards'].value
low_t = categorical_hazards[0].value
medium_t = categorical_hazards[1].value
high_t = categorical_hazards[2].value
# Determine attribute name for hazard levels
if self.hazard.layer.is_raster:
hazard_attribute = 'level'
else:
hazard_attribute = None
interpolated_result = assign_hazard_values_to_exposure_data(
self.hazard.layer,
self.exposure.layer,
attribute_name=hazard_attribute,
mode='constant')
# Extract relevant exposure data
attribute_names = interpolated_result.get_attribute_names()
attributes = interpolated_result.get_data()
buildings_total = len(interpolated_result)
# Calculate building impact
self.buildings = {}
self.affected_buildings = OrderedDict([
(tr('High Hazard Class'), {}),
(tr('Medium Hazard Class'), {}),
(tr('Low Hazard Class'), {})
])
for i in range(buildings_total):
if (structure_class_field and
structure_class_field in attribute_names):
usage = attributes[i][structure_class_field]
else:
usage = get_osm_building_usage(attribute_names, attributes[i])
if usage is None or usage == 0:
usage = 'unknown'
if usage not in self.buildings:
self.buildings[usage] = 0
for category in self.affected_buildings.keys():
self.affected_buildings[category][usage] = OrderedDict([
(tr('Buildings Affected'), 0)])
# Count all buildings by type
self.buildings[usage] += 1
attributes[i][self.target_field] = 0
attributes[i][self.affected_field] = 0
level = float(attributes[i]['level'])
level = float(numpy_round(level))
if level == high_t:
impact_level = tr('High Hazard Class')
elif level == medium_t:
impact_level = tr('Medium Hazard Class')
elif level == low_t:
impact_level = tr('Low Hazard Class')
else:
continue
# Add calculated impact to existing attributes
attributes[i][self.target_field] = {
tr('High Hazard Class'): 3,
tr('Medium Hazard Class'): 2,
tr('Low Hazard Class'): 1
}[impact_level]
attributes[i][self.affected_field] = 1
# Count affected buildings by type
self.affected_buildings[impact_level][usage][
tr('Buildings Affected')] += 1
# Consolidate the small building usage groups < 25 to other
self._consolidate_to_other()
# Create style
style_classes = [dict(label=tr('High'),
value=3,
colour='#F31A1C',
transparency=0,
size=2,
border_color='#969696',
border_width=0.2),
dict(label=tr('Medium'),
value=2,
colour='#F4A442',
transparency=0,
size=2,
border_color='#969696',
border_width=0.2),
dict(label=tr('Low'),
value=1,
colour='#EBF442',
transparency=0,
size=2,
border_color='#969696',
border_width=0.2),
dict(label=tr('Not Affected'),
value=None,
colour='#1EFC7C',
transparency=0,
size=2,
border_color='#969696',
border_width=0.2)]
style_info = dict(target_field=self.target_field,
style_classes=style_classes,
style_type='categorizedSymbol')
impact_table = impact_summary = self.generate_html_report()
# For printing map purpose
map_title = tr('Buildings affected')
legend_units = tr('(Low, Medium, High)')
legend_title = tr('Structure inundated status')
# Create vector layer and return
vector_layer = Vector(
data=attributes,
projection=self.exposure.layer.get_projection(),
geometry=self.exposure.layer.get_geometry(),
name=tr('Estimated buildings affected'),
keywords={
'impact_summary': impact_summary,
'impact_table': impact_table,
'target_field': self.affected_field,
'map_title': map_title,
'legend_units': legend_units,
'legend_title': legend_title,
'buildings_total': buildings_total,
'buildings_affected': self.total_affected_buildings},
style_info=style_info)
self._impact = vector_layer
return vector_layer
|
geometalab/Vector-Tiles-Reader-QGIS-Plugin
|
refs/heads/dev-qgis3
|
ext-libs/shapely/geometry/multilinestring.py
|
16
|
"""Collections of linestrings and related utilities
"""
import sys
if sys.version_info[0] < 3:
range = xrange
from ctypes import c_double, c_void_p, cast, POINTER
from shapely.geos import lgeos
from shapely.geometry.base import BaseMultipartGeometry, geos_geom_from_py
from shapely.geometry import linestring
from shapely.geometry.proxy import CachingGeometryProxy
__all__ = ['MultiLineString', 'asMultiLineString']
class MultiLineString(BaseMultipartGeometry):
"""
A collection of one or more line strings
A MultiLineString has non-zero length and zero area.
Attributes
----------
geoms : sequence
A sequence of LineStrings
"""
def __init__(self, lines=None):
"""
Parameters
----------
lines : sequence
A sequence of line-like coordinate sequences or objects that
provide the numpy array interface, including instances of
LineString.
Example
-------
Construct a collection containing one line string.
>>> lines = MultiLineString( [[[0.0, 0.0], [1.0, 2.0]]] )
"""
super(MultiLineString, self).__init__()
if not lines:
# allow creation of empty multilinestrings, to support unpickling
pass
else:
self._geom, self._ndim = geos_multilinestring_from_py(lines)
def shape_factory(self, *args):
return linestring.LineString(*args)
@property
def __geo_interface__(self):
return {
'type': 'MultiLineString',
'coordinates': tuple(tuple(c for c in g.coords) for g in self.geoms)
}
def svg(self, scale_factor=1., stroke_color=None):
"""Returns a group of SVG polyline elements for the LineString geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
stroke_color : str, optional
Hex string for stroke color. Default is to use "#66cc99" if
geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if stroke_color is None:
stroke_color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, stroke_color) for p in self) + \
'</g>'
class MultiLineStringAdapter(CachingGeometryProxy, MultiLineString):
context = None
_other_owned = False
def __init__(self, context):
self.context = context
self.factory = geos_multilinestring_from_py
@property
def _ndim(self):
try:
# From array protocol
array = self.context[0].__array_interface__
n = array['shape'][1]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.context[0][0])
def asMultiLineString(context):
"""Adapts a sequence of objects to the MultiLineString interface"""
return MultiLineStringAdapter(context)
def geos_multilinestring_from_py(ob):
# ob must be either a MultiLineString, a sequence, or
# array of sequences or arrays
if isinstance(ob, MultiLineString):
return geos_geom_from_py(ob)
obs = getattr(ob, 'geoms', ob)
L = len(obs)
assert L >= 1
exemplar = obs[0]
try:
N = len(exemplar[0])
except TypeError:
N = exemplar._ndim
if N not in (2, 3):
raise ValueError("Invalid coordinate dimensionality")
# Array of pointers to point geometries
subs = (c_void_p * L)()
# add to coordinate sequence
for l in range(L):
geom, ndims = linestring.geos_linestring_from_py(obs[l])
subs[l] = cast(geom, c_void_p)
return (lgeos.GEOSGeom_createCollection(5, subs, L), N)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
dkubiak789/OpenUpgrade
|
refs/heads/8.0
|
addons/google_drive/__init__.py
|
437
|
import google_drive
|
TRox1972/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/fusion.py
|
39
|
from __future__ import unicode_literals
from .common import InfoExtractor
from .ooyala import OoyalaIE
class FusionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?fusion\.net/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://fusion.net/video/201781/u-s-and-panamanian-forces-work-together-to-stop-a-vessel-smuggling-drugs/',
'info_dict': {
'id': 'ZpcWNoMTE6x6uVIIWYpHh0qQDjxBuq5P',
'ext': 'mp4',
'title': 'U.S. and Panamanian forces work together to stop a vessel smuggling drugs',
'description': 'md5:0cc84a9943c064c0f46b128b41b1b0d7',
'duration': 140.0,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://fusion.net/video/201781',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
ooyala_code = self._search_regex(
r'data-ooyala-id=(["\'])(?P<code>(?:(?!\1).)+)\1',
webpage, 'ooyala code', group='code')
return OoyalaIE._build_url_result(ooyala_code)
|
meabsence/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/tlslite/BaseDB.py
|
46
|
"""Base class for SharedKeyDB and VerifierDB."""
import dbm
import _thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = _thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = dbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = dbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return username in self.db
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = list(self.db.keys())
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames
|
wdv4758h/ZipPy
|
refs/heads/master
|
edu.uci.python.benchmark/src/benchmarks/spectralnorm2t.py
|
1
|
# The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# Contributed by Sebastien Loisel
# Fixed by Isaac Gouy
# Sped up by Josh Goldfoot
# Dirtily sped up by Simon Descarpentries
# Sped up by Joseph LaFata
from array import array
from math import sqrt
from sys import argv
import sys, time
if sys.version_info < (3, 0):
from itertools import izip as zip
else:
xrange = range
def eval_A (i, j):
return 1.0 / (((i + j) * (i + j + 1) >> 1) + i + 1)
def eval_A_times_u (u, resulted_list):
u_len = len (u)
local_eval_A = eval_A
for i in xrange (u_len):
partial_sum = 0
j = 0
while j < u_len:
partial_sum += local_eval_A (i, j) * u[j]
j += 1
resulted_list[i] = partial_sum
def eval_At_times_u (u, resulted_list):
u_len = len (u)
local_eval_A = eval_A
for i in xrange (u_len):
partial_sum = 0
j = 0
while j < u_len:
partial_sum += local_eval_A (j, i) * u[j]
j += 1
resulted_list[i] = partial_sum
def eval_AtA_times_u (u, out, tmp):
eval_A_times_u (u, tmp)
eval_At_times_u (tmp, out)
def main(num):
n = num
u = array("d", [1]) * n
v = array("d", [1]) * n
tmp = array("d", [1]) * n
local_eval_AtA_times_u = eval_AtA_times_u
for dummy in xrange (10):
local_eval_AtA_times_u (u, v, tmp)
local_eval_AtA_times_u (v, u, tmp)
vBv = vv = 0
for ue, ve in zip (u, v):
vBv += ue * ve
vv += ve * ve
print("%0.9f" % (sqrt(vBv/vv)))
def measure():
print("Start timing...")
start = time.time()
num = int(sys.argv[1])
main(num)
duration = "%.3f\n" % (time.time() - start)
print("spectralnorm: " + duration)
measure()
|
synasius/django
|
refs/heads/master
|
tests/view_tests/tests/test_static.py
|
337
|
from __future__ import unicode_literals
import mimetypes
import unittest
from os import path
from django.conf.urls.static import static
from django.http import FileResponse, HttpResponseNotModified
from django.test import SimpleTestCase, override_settings
from django.utils.http import http_date
from django.views.static import was_modified_since
from .. import urls
from ..urls import media_dir
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class StaticTests(SimpleTestCase):
"""Tests django views in django/views/static.py"""
prefix = 'site_media'
def test_serve(self):
"The static view can serve static media"
media_files = ['file.txt', 'file.txt.gz']
for filename in media_files:
response = self.client.get('/%s/%s' % (self.prefix, filename))
response_content = b''.join(response)
file_path = path.join(media_dir, filename)
with open(file_path, 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
self.assertEqual(mimetypes.guess_type(file_path)[1], response.get('Content-Encoding', None))
def test_chunked(self):
"The static view should stream files in chunks to avoid large memory usage"
response = self.client.get('/%s/%s' % (self.prefix, 'long-line.txt'))
first_chunk = next(response.streaming_content)
self.assertEqual(len(first_chunk), FileResponse.block_size)
second_chunk = next(response.streaming_content)
response.close()
# strip() to prevent OS line endings from causing differences
self.assertEqual(len(second_chunk.strip()), 1449)
def test_unknown_mime_type(self):
response = self.client.get('/%s/file.unknown' % self.prefix)
self.assertEqual('application/octet-stream', response['Content-Type'])
response.close()
def test_copes_with_empty_path_component(self):
file_name = 'file.txt'
response = self.client.get('/%s//%s' % (self.prefix, file_name))
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
def test_is_modified_since(self):
file_name = 'file.txt'
response = self.client.get('/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Thu, 1 Jan 1970 00:00:00 GMT')
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
def test_not_modified_since(self):
file_name = 'file.txt'
response = self.client.get(
'/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Mon, 18 Jan 2038 05:14:07 GMT'
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertIsInstance(response, HttpResponseNotModified)
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = 'Mon, 28 May 999999999999 28:25:26 GMT'
response = self.client.get('/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = ': 1291108438, Wed, 20 Oct 2010 14:05:00 GMT'
response = self.client.get('/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
response_content = b''.join(response)
with open(path.join(media_dir, file_name), 'rb') as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response['Content-Length']))
def test_404(self):
response = self.client.get('/%s/non_existing_resource' % self.prefix)
self.assertEqual(404, response.status_code)
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super(StaticHelperTest, self).setUp()
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static('/media/', document_root=media_dir)
def tearDown(self):
super(StaticHelperTest, self).tearDown()
urls.urlpatterns = self._old_views_urlpatterns
class StaticUtilsTests(unittest.TestCase):
def test_was_modified_since_fp(self):
"""
Test that a floating point mtime does not disturb was_modified_since.
(#18675)
"""
mtime = 1343416141.107817
header = http_date(mtime)
self.assertFalse(was_modified_since(header, mtime))
|
sarvex/django
|
refs/heads/master
|
tests/gis_tests/geos_tests/__init__.py
|
12133432
| |
paurosello/frappe
|
refs/heads/develop
|
frappe/patches/v6_15/__init__.py
|
12133432
| |
helldorado/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/remote_management/lxca/__init__.py
|
12133432
| |
Chilledheart/chromium
|
refs/heads/master
|
tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/dynamodb/__init__.py
|
12133432
| |
Don42/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/radiojavan.py
|
124
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import(
unified_strdate,
str_to_int,
)
class RadioJavanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?radiojavan\.com/videos/video/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.radiojavan.com/videos/video/chaartaar-ashoobam',
'md5': 'e85208ffa3ca8b83534fca9fe19af95b',
'info_dict': {
'id': 'chaartaar-ashoobam',
'ext': 'mp4',
'title': 'Chaartaar - Ashoobam',
'thumbnail': 're:^https?://.*\.jpe?g$',
'upload_date': '20150215',
'view_count': int,
'like_count': int,
'dislike_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = [{
'url': 'https://media.rdjavan.com/media/music_video/%s' % video_path,
'format_id': '%sp' % height,
'height': int(height),
} for height, video_path in re.findall(r"RJ\.video(\d+)p\s*=\s*'/?([^']+)'", webpage)]
self._sort_formats(formats)
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'class="date_added">Date added: ([^<]+)<',
webpage, 'upload date', fatal=False))
view_count = str_to_int(self._search_regex(
r'class="views">Plays: ([\d,]+)',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) likes',
webpage, 'like count', fatal=False))
dislike_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) dislikes',
webpage, 'dislike count', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
|
addition-it-solutions/project-all
|
refs/heads/master
|
addons/auth_signup/controllers/__init__.py
|
7372
|
import main
|
Elettronik/SickRage
|
refs/heads/master
|
lib/github/Stargazer.py
|
25
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Christopher Gilbert <christopher.john.gilbert@gmail.com> #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Adrian Petrescu <adrian.petrescu@maluuba.com> #
# Copyright 2013 Mark Roddy <markroddy@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2015 Dan Vanderkam <danvdk@gmail.com> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github
class Stargazer(github.GithubObject.NonCompletableGithubObject):
"""
This class represents Stargazers with the date of starring as returned by
https://developer.github.com/v3/activity/starring/#alternative-response-with-star-creation-timestamps
"""
@property
def starred_at(self):
"""
:type: datetime.datetime
"""
return self._starred_at.value
@property
def user(self):
"""
:type: :class:`github.NamedUser`
"""
return self._user.value
def _initAttributes(self):
self._starred_at = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if 'starred_at' in attributes:
self._starred_at = self._makeDatetimeAttribute(attributes['starred_at'])
if 'user' in attributes:
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes['user'])
|
Antiun/server-tools
|
refs/heads/8.0
|
base_suspend_security/models/ir_model_access.py
|
26
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, tools
from ..base_suspend_security import BaseSuspendSecurityUid
class IrModelAccess(models.Model):
_inherit = 'ir.model.access'
@tools.ormcache_context(accepted_keys=('lang'))
def check(self, cr, uid, model, mode='read', raise_exception=True,
context=None):
if isinstance(uid, BaseSuspendSecurityUid):
return True
return super(IrModelAccess, self).check(
cr, uid, model, mode=mode, raise_exception=raise_exception,
context=context)
|
tcatut/plugin.video.youtube
|
refs/heads/master
|
resources/lib/youtube/helper/tv.py
|
1
|
__author__ = 'bromix'
from resources.lib import kodion
from resources.lib.youtube.helper import utils
from resources.lib.kodion.items.video_item import VideoItem
def my_subscriptions_to_items(provider, context, json_data):
result = []
video_id_dict = {}
items = json_data.get('items', [])
for item in items:
video_id = item['id']
video_item = VideoItem(item['title'],
uri=context.create_uri(['play'], {'video_id': video_id}))
result.append(video_item)
video_id_dict[video_id] = video_item
pass
channel_item_dict = {}
utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict)
utils.update_fanarts(provider, context, channel_item_dict)
# next page
continuations = json_data.get('continuations', '')
if continuations:
new_params = {}
new_params.update(context.get_params())
new_params['continuations'] = continuations
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
pass
return result
|
javalovelinux/SparkGroovyScript
|
refs/heads/master
|
dist/examples/src/main/python/mllib/multi_class_metrics_example.py
|
98
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.util import MLUtils
from pyspark.mllib.evaluation import MulticlassMetrics
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="MultiClassMetricsExample")
# Several of the methods available in scala are currently missing from pyspark
# $example on$
# Load training data in LIBSVM format
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_multiclass_classification_data.txt")
# Split data into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=11)
training.cache()
# Run training algorithm to build the model
model = LogisticRegressionWithLBFGS.train(training, numClasses=3)
# Compute raw scores on the test set
predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
# Instantiate metrics object
metrics = MulticlassMetrics(predictionAndLabels)
# Overall statistics
precision = metrics.precision()
recall = metrics.recall()
f1Score = metrics.fMeasure()
print("Summary Stats")
print("Precision = %s" % precision)
print("Recall = %s" % recall)
print("F1 Score = %s" % f1Score)
# Statistics by class
labels = data.map(lambda lp: lp.label).distinct().collect()
for label in sorted(labels):
print("Class %s precision = %s" % (label, metrics.precision(label)))
print("Class %s recall = %s" % (label, metrics.recall(label)))
print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))
# Weighted stats
print("Weighted recall = %s" % metrics.weightedRecall)
print("Weighted precision = %s" % metrics.weightedPrecision)
print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
# $example off$
|
derDavidT/sympy
|
refs/heads/master
|
sympy/assumptions/tests/test_sathandlers.py
|
50
|
from sympy import Mul, Basic, Q, Expr, And, symbols, Equivalent, Implies, Or
from sympy.assumptions.sathandlers import (ClassFactRegistry, AllArgs,
UnevaluatedOnFree, AnyArgs, CheckOldAssump, ExactlyOneArg)
from sympy.utilities.pytest import raises
x, y, z = symbols('x y z')
def test_class_handler_registry():
my_handler_registry = ClassFactRegistry()
# The predicate doesn't matter here, so just use is_true
fact1 = Equivalent(Q.is_true, AllArgs(Q.is_true))
fact2 = Equivalent(Q.is_true, AnyArgs(Q.is_true))
my_handler_registry[Mul] = set([fact1])
my_handler_registry[Expr] = set([fact2])
assert my_handler_registry[Basic] == set()
assert my_handler_registry[Expr] == set([fact2])
assert my_handler_registry[Mul] == set([fact1, fact2])
def test_UnevaluatedOnFree():
a = UnevaluatedOnFree(Q.positive)
b = UnevaluatedOnFree(Q.positive | Q.negative)
c = UnevaluatedOnFree(Q.positive & ~Q.positive) # It shouldn't do any deduction
assert a.rcall(x) == UnevaluatedOnFree(Q.positive(x))
assert b.rcall(x) == UnevaluatedOnFree(Q.positive(x) | Q.negative(x))
assert c.rcall(x) == UnevaluatedOnFree(Q.positive(x) & ~Q.positive(x))
assert a.rcall(x).expr == x
assert a.rcall(x).pred == Q.positive
assert b.rcall(x).pred == Q.positive | Q.negative
raises(ValueError, lambda: UnevaluatedOnFree(Q.positive(x) | Q.negative))
raises(ValueError, lambda: UnevaluatedOnFree(Q.positive(x) |
Q.negative(y)))
class MyUnevaluatedOnFree(UnevaluatedOnFree):
def apply(self):
return self.args[0]
a = MyUnevaluatedOnFree(Q.positive)
b = MyUnevaluatedOnFree(Q.positive | Q.negative)
c = MyUnevaluatedOnFree(Q.positive(x))
d = MyUnevaluatedOnFree(Q.positive(x) | Q.negative(x))
assert a.rcall(x) == c == Q.positive(x)
assert b.rcall(x) == d == Q.positive(x) | Q.negative(x)
raises(ValueError, lambda: MyUnevaluatedOnFree(Q.positive(x) | Q.negative(y)))
def test_AllArgs():
a = AllArgs(Q.zero)
b = AllArgs(Q.positive | Q.negative)
assert a.rcall(x*y) == And(Q.zero(x), Q.zero(y))
assert b.rcall(x*y) == And(Q.positive(x) | Q.negative(x), Q.positive(y) | Q.negative(y))
def test_AnyArgs():
a = AnyArgs(Q.zero)
b = AnyArgs(Q.positive & Q.negative)
assert a.rcall(x*y) == Or(Q.zero(x), Q.zero(y))
assert b.rcall(x*y) == Or(Q.positive(x) & Q.negative(x), Q.positive(y) & Q.negative(y))
def test_CheckOldAssump():
# TODO: Make these tests more complete
class Test1(Expr):
def _eval_is_positive(self):
return True
def _eval_is_negative(self):
return False
class Test2(Expr):
def _eval_is_finite(self):
return True
def _eval_is_positive(self):
return True
def _eval_is_negative(self):
return False
t1 = Test1()
t2 = Test2()
# We can't say if it's positive or negative in the old assumptions without
# bounded. Remember, True means "no new knowledge", and
# Q.positive(t2) means "t2 is positive."
assert CheckOldAssump(Q.positive(t1)) == True
assert CheckOldAssump(Q.negative(t1)) == ~Q.negative(t1)
assert CheckOldAssump(Q.positive(t2)) == Q.positive(t2)
assert CheckOldAssump(Q.negative(t2)) == ~Q.negative(t2)
def test_ExactlyOneArg():
a = ExactlyOneArg(Q.zero)
b = ExactlyOneArg(Q.positive | Q.negative)
assert a.rcall(x*y) == Or(Q.zero(x) & ~Q.zero(y), Q.zero(y) & ~Q.zero(x))
assert a.rcall(x*y*z) == Or(Q.zero(x) & ~Q.zero(y) & ~Q.zero(z), Q.zero(y)
& ~Q.zero(x) & ~Q.zero(z), Q.zero(z) & ~Q.zero(x) & ~Q.zero(y))
assert b.rcall(x*y) == Or((Q.positive(x) | Q.negative(x)) &
~(Q.positive(y) | Q.negative(y)), (Q.positive(y) | Q.negative(y)) &
~(Q.positive(x) | Q.negative(x)))
|
pprett/scikit-learn
|
refs/heads/master
|
examples/neighbors/plot_kde_1d.py
|
60
|
"""
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
|
marcore/edx-platform
|
refs/heads/master
|
common/djangoapps/config_models/decorators.py
|
179
|
"""Decorators for model-based configuration. """
from functools import wraps
from django.http import HttpResponseNotFound
def require_config(config_model):
"""View decorator that enables/disables a view based on configuration.
Arguments:
config_model (ConfigurationModel subclass): The class of the configuration
model to check.
Returns:
HttpResponse: 404 if the configuration model is disabled,
otherwise returns the response from the decorated view.
"""
def _decorator(func):
@wraps(func)
def _inner(*args, **kwargs):
if not config_model.current().enabled:
return HttpResponseNotFound()
else:
return func(*args, **kwargs)
return _inner
return _decorator
|
mahak/cinder
|
refs/heads/master
|
cinder/volume/drivers/huawei/huawei_utils.py
|
2
|
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import math
from oslo_log import log as logging
from oslo_utils.secretutils import md5
from oslo_utils import strutils
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume.drivers.huawei import constants
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def encode_name(name):
encoded_name = md5(name.encode('utf-8'),
usedforsecurity=False).hexdigest()
prefix = name.split('-')[0] + '-'
postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)]
return prefix + postfix
def old_encode_name(name):
pre_name = name.split("-")[0]
vol_encoded = six.text_type(hash(name))
if vol_encoded.startswith('-'):
newuuid = pre_name + vol_encoded
else:
newuuid = pre_name + '-' + vol_encoded
return newuuid
def encode_host_name(name):
if name and len(name) > constants.MAX_NAME_LENGTH:
encoded_name = md5(name.encode('utf-8'),
usedforsecurity=False).hexdigest()
return encoded_name[:constants.MAX_NAME_LENGTH]
return name
def old_encode_host_name(name):
if name and len(name) > constants.MAX_NAME_LENGTH:
name = six.text_type(hash(name))
return name
def wait_for_condition(func, interval, timeout):
"""Wait for ``func`` to return True.
This retries running func until it either returns True or raises an
exception.
:param func: The function to call.
:param interval: The interval to wait in seconds between calls.
:param timeout: The maximum time in seconds to wait.
"""
if interval == 0:
interval = 1
if timeout == 0:
timeout = 1
@utils.retry(exception.VolumeDriverException,
interval=interval,
backoff_rate=1,
retries=(math.ceil(timeout / interval)))
def _retry_call():
result = func()
if not result:
raise exception.VolumeDriverException(
_('Timed out waiting for condition.'))
_retry_call()
def _get_volume_type(volume):
if volume.volume_type:
return volume.volume_type
if volume.volume_type_id:
return volume_types.get_volume_type(None, volume.volume_type_id)
def get_volume_params(volume):
volume_type = _get_volume_type(volume)
return get_volume_type_params(volume_type)
def get_volume_type_params(volume_type):
specs = {}
if isinstance(volume_type, dict) and volume_type.get('extra_specs'):
specs = volume_type['extra_specs']
elif isinstance(volume_type, objects.VolumeType
) and volume_type.extra_specs:
specs = volume_type.extra_specs
vol_params = get_volume_params_from_specs(specs)
vol_params['qos'] = None
if isinstance(volume_type, dict) and volume_type.get('qos_specs_id'):
vol_params['qos'] = _get_qos_specs(volume_type['qos_specs_id'])
elif isinstance(volume_type, objects.VolumeType
) and volume_type.qos_specs_id:
vol_params['qos'] = _get_qos_specs(volume_type.qos_specs_id)
LOG.info('volume opts %s.', vol_params)
return vol_params
def get_volume_params_from_specs(specs):
opts = _get_opts_from_specs(specs)
_verify_smartcache_opts(opts)
_verify_smartpartition_opts(opts)
_verify_smartthin_opts(opts)
return opts
def _get_opts_from_specs(specs):
"""Get the well defined extra specs."""
opts = {}
def _get_bool_param(k, v):
words = v.split()
if len(words) == 2 and words[0] == '<is>':
return strutils.bool_from_string(words[1], strict=True)
msg = _("%(k)s spec must be specified as %(k)s='<is> True' "
"or '<is> False'.") % {'k': k}
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _get_replication_type_param(k, v):
words = v.split()
if len(words) == 2 and words[0] == '<in>':
REPLICA_SYNC_TYPES = {'sync': constants.REPLICA_SYNC_MODEL,
'async': constants.REPLICA_ASYNC_MODEL}
sync_type = words[1].lower()
if sync_type in REPLICA_SYNC_TYPES:
return REPLICA_SYNC_TYPES[sync_type]
msg = _("replication_type spec must be specified as "
"replication_type='<in> sync' or '<in> async'.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _get_string_param(k, v):
if not v:
msg = _("%s spec must be specified as a string.") % k
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return v
opts_capability = {
'capabilities:smarttier': (_get_bool_param, False),
'capabilities:smartcache': (_get_bool_param, False),
'capabilities:smartpartition': (_get_bool_param, False),
'capabilities:thin_provisioning_support': (_get_bool_param, False),
'capabilities:thick_provisioning_support': (_get_bool_param, False),
'capabilities:hypermetro': (_get_bool_param, False),
'capabilities:replication_enabled': (_get_bool_param, False),
'replication_type': (_get_replication_type_param,
constants.REPLICA_ASYNC_MODEL),
'smarttier:policy': (_get_string_param, None),
'smartcache:cachename': (_get_string_param, None),
'smartpartition:partitionname': (_get_string_param, None),
'huawei_controller:controllername': (_get_string_param, None),
'capabilities:dedup': (_get_bool_param, None),
'capabilities:compression': (_get_bool_param, None),
}
def _get_opt_key(spec_key):
key_split = spec_key.split(':')
if len(key_split) == 1:
return key_split[0]
else:
return key_split[1]
for spec_key in opts_capability:
opt_key = _get_opt_key(spec_key)
opts[opt_key] = opts_capability[spec_key][1]
for key, value in six.iteritems(specs):
if key not in opts_capability:
continue
func = opts_capability[key][0]
opt_key = _get_opt_key(key)
opts[opt_key] = func(key, value)
return opts
def _get_qos_specs(qos_specs_id):
ctxt = context.get_admin_context()
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)
if specs is None:
return {}
if specs.get('consumer') == 'front-end':
return {}
kvs = specs.get('specs', {})
LOG.info('The QoS specs is: %s.', kvs)
qos = {'IOTYPE': kvs.pop('IOType', None)}
if qos['IOTYPE'] not in constants.QOS_IOTYPES:
msg = _('IOType must be in %(types)s.'
) % {'types': constants.QOS_IOTYPES}
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for k, v in kvs.items():
if k not in constants.QOS_SPEC_KEYS:
msg = _('QoS key %s is not valid.') % k
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if int(v) <= 0:
msg = _('QoS value for %s must > 0.') % k
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
qos[k.upper()] = v
if len(qos) < 2:
msg = _('QoS policy must specify both IOType and one another '
'qos spec, got policy: %s.') % qos
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
qos_keys = set(qos.keys())
if (qos_keys & set(constants.UPPER_LIMIT_KEYS) and
qos_keys & set(constants.LOWER_LIMIT_KEYS)):
msg = _('QoS policy upper limit and lower limit '
'conflict, QoS policy: %s.') % qos
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return qos
def _verify_smartthin_opts(opts):
if (opts['thin_provisioning_support'] and
opts['thick_provisioning_support']):
msg = _('Cannot set thin and thick at the same time.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
elif opts['thin_provisioning_support']:
opts['LUNType'] = constants.THIN_LUNTYPE
elif opts['thick_provisioning_support']:
opts['LUNType'] = constants.THICK_LUNTYPE
def _verify_smartcache_opts(opts):
if opts['smartcache'] and not opts['cachename']:
msg = _('Cache name is not specified, please set '
'smartcache:cachename in extra specs.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _verify_smartpartition_opts(opts):
if opts['smartpartition'] and not opts['partitionname']:
msg = _('Partition name is not specified, please set '
'smartpartition:partitionname in extra specs.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def wait_lun_online(client, lun_id, wait_interval=None, wait_timeout=None):
def _lun_online():
result = client.get_lun_info_by_id(lun_id)
if result['HEALTHSTATUS'] != constants.STATUS_HEALTH:
err_msg = _('LUN %s is abnormal.') % lun_id
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
if result['RUNNINGSTATUS'] == constants.LUN_INITIALIZING:
return False
return True
if not wait_interval:
wait_interval = constants.DEFAULT_WAIT_INTERVAL
if not wait_timeout:
wait_timeout = wait_interval * 10
wait_for_condition(_lun_online, wait_interval, wait_timeout)
def is_not_exist_exc(exc):
msg = getattr(exc, 'msg', '')
return 'not exist' in msg
def to_string(**kwargs):
return json.dumps(kwargs) if kwargs else ''
def to_dict(text):
return json.loads(text) if text else {}
def get_volume_private_data(volume):
if not volume.provider_location:
return {}
try:
info = json.loads(volume.provider_location)
except Exception:
LOG.exception("Decode volume provider_location error")
return {}
if isinstance(info, dict):
return info
# To keep compatible with old driver version
return {'huawei_lun_id': six.text_type(info),
'huawei_lun_wwn': volume.admin_metadata.get('huawei_lun_wwn'),
'huawei_sn': volume.metadata.get('huawei_sn'),
'hypermetro_id': volume.metadata.get('hypermetro_id'),
'remote_lun_id': volume.metadata.get('remote_lun_id')
}
def get_volume_metadata(volume):
if isinstance(volume, objects.Volume):
return volume.metadata
if volume.get('volume_metadata'):
return {item['key']: item['value'] for item in
volume['volume_metadata']}
return {}
def get_replication_data(volume):
if not volume.replication_driver_data:
return {}
return json.loads(volume.replication_driver_data)
def get_snapshot_private_data(snapshot):
if not snapshot.provider_location:
return {}
info = json.loads(snapshot.provider_location)
if isinstance(info, dict):
return info
# To keep compatible with old driver version
return {'huawei_snapshot_id': six.text_type(info),
'huawei_snapshot_wwn': snapshot.metadata.get(
'huawei_snapshot_wwn'),
}
def get_external_lun_info(client, external_ref):
lun_info = None
if 'source-id' in external_ref:
lun = client.get_lun_info_by_id(external_ref['source-id'])
lun_info = client.get_lun_info_by_name(lun['NAME'])
elif 'source-name' in external_ref:
lun_info = client.get_lun_info_by_name(external_ref['source-name'])
return lun_info
def get_external_snapshot_info(client, external_ref):
snapshot_info = None
if 'source-id' in external_ref:
snapshot_info = client.get_snapshot_info_by_id(
external_ref['source-id'])
elif 'source-name' in external_ref:
snapshot_info = client.get_snapshot_info_by_name(
external_ref['source-name'])
return snapshot_info
def get_lun_info(client, volume):
metadata = get_volume_private_data(volume)
volume_name = encode_name(volume.id)
lun_info = client.get_lun_info_by_name(volume_name)
# If new encoded way not found, try the old encoded way.
if not lun_info:
volume_name = old_encode_name(volume.id)
lun_info = client.get_lun_info_by_name(volume_name)
if not lun_info and metadata.get('huawei_lun_id'):
lun_info = client.get_lun_info_by_id(metadata['huawei_lun_id'])
if lun_info and ('huawei_lun_wwn' in metadata and
lun_info.get('WWN') != metadata['huawei_lun_wwn']):
return None
return lun_info
def get_snapshot_info(client, snapshot):
name = encode_name(snapshot.id)
snapshot_info = client.get_snapshot_info_by_name(name)
# If new encoded way not found, try the old encoded way.
if not snapshot_info:
name = old_encode_name(snapshot.id)
snapshot_info = client.get_snapshot_info_by_name(name)
return snapshot_info
def get_host_id(client, host_name):
encoded_name = encode_host_name(host_name)
host_id = client.get_host_id_by_name(encoded_name)
if encoded_name == host_name:
return host_id
if not host_id:
encoded_name = old_encode_host_name(host_name)
host_id = client.get_host_id_by_name(encoded_name)
return host_id
def get_hypermetro_group(client, group_id):
encoded_name = encode_name(group_id)
group = client.get_metrogroup_by_name(encoded_name)
if not group:
encoded_name = old_encode_name(group_id)
group = client.get_metrogroup_by_name(encoded_name)
return group
def get_replication_group(client, group_id):
encoded_name = encode_name(group_id)
group = client.get_replication_group_by_name(encoded_name)
if not group:
encoded_name = old_encode_name(group_id)
group = client.get_replication_group_by_name(encoded_name)
return group
def get_volume_model_update(volume, **kwargs):
private_data = get_volume_private_data(volume)
if kwargs.get('hypermetro_id'):
private_data['hypermetro_id'] = kwargs.get('hypermetro_id')
elif 'hypermetro_id' in private_data:
private_data.pop('hypermetro_id')
if 'huawei_lun_id' in kwargs:
private_data['huawei_lun_id'] = kwargs['huawei_lun_id']
if 'huawei_lun_wwn' in kwargs:
private_data['huawei_lun_wwn'] = kwargs['huawei_lun_wwn']
if 'huawei_sn' in kwargs:
private_data['huawei_sn'] = kwargs['huawei_sn']
model_update = {'provider_location': to_string(**private_data)}
if kwargs.get('replication_id'):
model_update['replication_driver_data'] = to_string(
pair_id=kwargs.get('replication_id'))
model_update['replication_status'] = fields.ReplicationStatus.ENABLED
else:
model_update['replication_driver_data'] = None
model_update['replication_status'] = fields.ReplicationStatus.DISABLED
return model_update
def get_group_type_params(group):
opts = []
for volume_type in group.volume_types:
opt = get_volume_type_params(volume_type)
opts.append(opt)
return opts
|
ramitalat/odoo
|
refs/heads/8.0
|
openerp-wsgi.py
|
363
|
# WSGI Handler sample configuration file.
#
# Change the appropriate settings below, in order to provide the parameters
# that would normally be passed in the command-line.
# (at least conf['addons_path'])
#
# For generic wsgi handlers a global application is defined.
# For uwsgi this should work:
# $ uwsgi_python --http :9090 --pythonpath . --wsgi-file openerp-wsgi.py
#
# For gunicorn additional globals need to be defined in the Gunicorn section.
# Then the following command should run:
# $ gunicorn openerp:service.wsgi_server.application -c openerp-wsgi.py
import openerp
#----------------------------------------------------------
# Common
#----------------------------------------------------------
openerp.multi_process = True # Nah!
# Equivalent of --load command-line option
openerp.conf.server_wide_modules = ['web']
conf = openerp.tools.config
# Path to the OpenERP Addons repository (comma-separated for
# multiple locations)
conf['addons_path'] = '../../addons/trunk,../../web/trunk/addons'
# Optional database config if not using local socket
#conf['db_name'] = 'mycompany'
#conf['db_host'] = 'localhost'
#conf['db_user'] = 'foo'
#conf['db_port'] = 5432
#conf['db_password'] = 'secret'
#----------------------------------------------------------
# Generic WSGI handlers application
#----------------------------------------------------------
application = openerp.service.wsgi_server.application
openerp.service.server.load_server_wide_modules()
#----------------------------------------------------------
# Gunicorn
#----------------------------------------------------------
# Standard OpenERP XML-RPC port is 8069
bind = '127.0.0.1:8069'
pidfile = '.gunicorn.pid'
workers = 4
timeout = 240
max_requests = 2000
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Maspear/odoo
|
refs/heads/8.0
|
addons/account/test/test_parent_structure.py
|
432
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO: move this in a YAML test with !python tag
#
import xmlrpclib
DB = 'training3'
USERID = 1
USERPASS = 'admin'
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % ('localhost',8069))
ids = sock.execute(DB, USERID, USERPASS, 'account.account', 'search', [], {})
account_lists = sock.execute(DB, USERID, USERPASS, 'account.account', 'read', ids, ['parent_id','parent_left','parent_right'])
accounts = dict(map(lambda x: (x['id'],x), account_lists))
for a in account_lists:
if a['parent_id']:
assert a['parent_left'] > accounts[a['parent_id'][0]]['parent_left']
assert a['parent_right'] < accounts[a['parent_id'][0]]['parent_right']
assert a['parent_left'] < a['parent_right']
for a2 in account_lists:
assert not ((a2['parent_right']>a['parent_left']) and
(a2['parent_left']<a['parent_left']) and
(a2['parent_right']<a['parent_right']))
if a2['parent_id']==a['id']:
assert (a2['parent_left']>a['parent_left']) and (a2['parent_right']<a['parent_right'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dkubiak789/odoo
|
refs/heads/8.0
|
addons/sale_mrp/sale_mrp.py
|
225
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_production(osv.osv):
_inherit = 'mrp.production'
def _ref_calc(self, cr, uid, ids, field_names=None, arg=False, context=None):
""" Finds reference of sales order for production order.
@param field_names: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
if not field_names:
field_names = []
for id in ids:
res[id] = {}.fromkeys(field_names, False)
for f in field_names:
field_name = False
if f == 'sale_name':
field_name = 'name'
if f == 'sale_ref':
field_name = 'client_order_ref'
for key, value in self._get_sale_ref(cr, uid, ids, field_name).items():
res[key][f] = value
return res
def _get_sale_ref(self, cr, uid, ids, field_name=False):
move_obj = self.pool.get('stock.move')
def get_parent_move(move_id):
move = move_obj.browse(cr, uid, move_id)
if move.move_dest_id:
return get_parent_move(move.move_dest_id.id)
return move_id
res = {}
productions = self.browse(cr, uid, ids)
for production in productions:
res[production.id] = False
if production.move_prod_id:
parent_move_line = get_parent_move(production.move_prod_id.id)
if parent_move_line:
move = move_obj.browse(cr, uid, parent_move_line)
if field_name == 'name':
res[production.id] = move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.name or False
if field_name == 'client_order_ref':
res[production.id] = move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.client_order_ref or False
return res
_columns = {
'sale_name': fields.function(_ref_calc, multi='sale_name', type='char', string='Sale Name', help='Indicate the name of sales order.'),
'sale_ref': fields.function(_ref_calc, multi='sale_name', type='char', string='Sale Reference', help='Indicate the Customer Reference from sales order.'),
}
class sale_order(osv.Model):
_inherit = 'sale.order'
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
result = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
result['property_ids'] = [(6, 0, [x.id for x in line.property_ids])]
return result
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'property_ids': fields.many2many('mrp.property', 'sale_order_line_property_rel', 'order_id', 'property_id', 'Properties', readonly=True, states={'draft': [('readonly', False)]}),
}
class stock_move(osv.osv):
_inherit = 'stock.move'
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
res = super(stock_move, self)._prepare_procurement_from_move(cr, uid, move, context=context)
if res and move.procurement_id and move.procurement_id.property_ids:
res['property_ids'] = [(6, 0, [x.id for x in move.procurement_id.property_ids])]
return res
def _action_explode(self, cr, uid, move, context=None):
""" Explodes pickings.
@param move: Stock moves
@return: True
"""
if context is None:
context = {}
property_ids = map(int, move.procurement_id.sale_line_id.property_ids or [])
return super(stock_move, self)._action_explode(cr, uid, move, context=dict(context, property_ids=property_ids))
|
rafaelfccg/taia_final_project
|
refs/heads/master
|
KH.py
|
1
|
import random
import math
import benchmarkFunctions
import copy
NUM_DIMENSIONS = 20
NUM_ITERATIONS = 1000
POPULATION_SIZE = 50
random_range_value = 1
INERTIA_NEIGHBORS = 0.9
INERTIA_FOOD = 0.9
CT = 0.5
N_MAX = 0.02
FORAGING_SPEED = 0.02
DIFUSION_SPEED = 0.005
EPSILON = 10**-5
CONVERGENCE_PRECISION = 10**-3
X_MAX = 32
X_MIN = -32
Y_MAX = 32
Y_MIN = -32
fitness = benchmarkFunctions.ackley
kbest = 10**9
kworst = 0
SOLUTION_FOUND_ITERATIONS = list()
CONVERGENT_EXECS = 0
CONVERGENT_INDIVIDUALS = list()
ALL_SOLVED_ITERATIONS = list()
INDIVIDUALS_FITNESS = list()
KBEST_FITNESS = list()
# individual representation, (self, self_historical_best, old_N, old_F)
def generate_population():
population = list()
for i in range(POPULATION_SIZE):
genome = list()
for s in range(NUM_DIMENSIONS):
individual = random.uniform(X_MIN, X_MAX);
genome.append(individual)
population.append((genome, genome, zero_vector(NUM_DIMENSIONS), zero_vector(NUM_DIMENSIONS)))
return population
def generate_population_branin():
population = list()
for i in range(POPULATION_SIZE):
genome = list()
individual1 = random.uniform(X_MIN, X_MAX);
individual2 = random.uniform(Y_MIN, Y_MAX);
genome.extend([individual1, individual2])
population.append((genome, genome, zero_vector(NUM_DIMENSIONS), zero_vector(NUM_DIMENSIONS)))
return population
def make_rand_vector(dims):
vec = [random.uniform(-random_range_value, random_range_value) for i in range(dims)]
#mag = sum(x**2 for x in vec) ** .5
return [x for x in vec]
def zero_vector(dims):
return [0 for i in range(dims)]
def norm(vector):
return math.sqrt(sum(map(lambda x : x**2, vector)))
def vector_diff(vector1, vector2):
return [x_i - x_j for x_i, x_j in zip(vector1, vector2)]
def vector_sum(vector1, vector2):
return [x_i + x_j for x_i, x_j in zip(vector1, vector2)]
def vector_constant_product(vector1, constant):
return [x_i * constant for x_i in vector1]
def random_difusion(iteration):
return vector_constant_product(make_rand_vector(NUM_DIMENSIONS), DIFUSION_SPEED * (1 - iteration/float(NUM_ITERATIONS)))
def distance(v1, v2):
return norm(vector_diff(v1,v2))
def k_hat(ki, kj):
return (ki - kj) / (kworst - kbest)
def x_hat(xi, xj):
diff = vector_diff(xj,xi)
norm_diff = norm(diff)
return [x/(norm_diff + EPSILON) for x in diff]
def alfa_local(krill, krill_fit, population, population_fitness):
(neighbors, neighbors_fit) = find_neighbors(krill, population, population_fitness)
# print "num neighbors:" +str(len(neighbors))
# print "neighbors:" +str(neighbors)
sum_vec = zero_vector(NUM_DIMENSIONS)
for idx, value in enumerate(neighbors):
sum_vec = vector_sum(sum_vec, k_x_hat_product(krill, value, krill_fit, neighbors_fit[idx]))
return sum_vec
def find_neighbors(krill, population, population_fitness):
ds = sensing_distance(krill,population)
# print "sensing_distance: " + str(ds)
neighbors = list()
neighbors_fit = list()
for idx, x in enumerate(population):
individual_i = x[0]
distance_i = distance(krill,individual_i)
# print distance_i
if(individual_i != krill and distance_i <= ds):
neighbors.append(x[0])
neighbors_fit.append(population_fitness[idx])
return (neighbors, neighbors_fit)
def sensing_distance(krill, population):
val1 = sum(map(lambda x : distance(x[0], krill), population))
# print val1
return val1/(POPULATION_SIZE*5)
def alfa_target(krill, krill_fit, best, best_fit, iteration):
cbest = C_best(iteration)
return vector_constant_product(k_x_hat_product(krill, best, krill_fit, best_fit), cbest)
def k_x_hat_product(krill_i,krill_j,fitness_i, fitness_j):
return vector_constant_product(x_hat(krill_i, krill_j), k_hat(fitness_i, fitness_j))
def alfa(krill, krill_fit, best, population, population_fitness, iteration):
best_fit = fitness(best)
local = alfa_local(krill, krill_fit, population, population_fitness)
target = alfa_target(krill, krill_fit, best, best_fit, iteration)
# print "local: "+ str(local)
# print "target: "+ str(target)
return vector_sum(local,target)
def C_best(iteration):
return 2 * (random.uniform(0,1) + iteration/float(NUM_ITERATIONS))
def food_position(population, population_fitness):
sum_denominator = 0
sum_numerator = zero_vector(len(population[0][0]))
for idx, krill in enumerate(population):
fit_weight = 1/population_fitness[idx]
sum_numerator = vector_sum(sum_numerator, vector_constant_product(krill[0],fit_weight))
sum_denominator += fit_weight
return vector_constant_product(sum_numerator, 1/sum_denominator)
def beta_food(krill, krill_fit, food_pos, iteration):
# print (food_pos)
food_fit = fitness(food_pos)
return vector_constant_product(k_x_hat_product(krill, food_pos, krill_fit, food_fit), C_food(iteration))
def C_food(iteration):
return 2*(1 - iteration/float(NUM_ITERATIONS))
def neighbors_induced_mov(krill, krill_fit, best, population, population_fitness, old_N, iteration):
return vector_sum(vector_constant_product(alfa(krill, krill_fit, best, population, population_fitness, iteration), N_MAX), vector_constant_product(old_N, INERTIA_NEIGHBORS))
def beta(krill, krill_fit, krill_best, x_food, population, population_fitness, iteration):
return vector_sum( beta_food(krill, krill_fit, x_food, iteration), k_x_hat_product(krill, krill_best, krill_fit, fitness(krill_best)))
def food_induced_mov(krill, krill_fit, krill_best, x_food, population, population_fitness, old_F, iteration):
return vector_sum(vector_constant_product(beta(krill, krill_fit, krill_best, x_food, population, population_fitness, iteration), FORAGING_SPEED), vector_constant_product(old_F, INERTIA_FOOD))
def dX_dt(krill, krill_fit, krill_best, best, x_food, population, population_fitness, old_N, old_F, iteration):
Ni = neighbors_induced_mov(krill, krill_fit, best, population, population_fitness, old_N, iteration)
# print Ni
Fi = food_induced_mov(krill, krill_fit, krill_best, x_food, population, population_fitness, old_F, iteration)
Di = random_difusion(iteration)
return (vector_sum(vector_sum(Ni,Fi),Di), Ni, Fi)
def move(krill, delta_t, delta_move):
return vector_sum( krill,vector_constant_product(delta_move, delta_t))
def select_best_krill(population):
min_krill = population[0]
min_fitness = 10**9
population_fitness = list()
for x in population:
curr_fit = fitness(x[0])
population_fitness.append(curr_fit)
if min_fitness > curr_fit:
min_krill = x
min_fitness = curr_fit
return (min_krill,population_fitness)
def delta_t(population):
# sumi = 0
# lower_bound = copy.copy(population[0][0])
# upper_bound = copy.copy(population[0][0])
# for x in population:
# for xi in range(NUM_DIMENSIONS):
# if lower_bound[xi] > x[0][xi]:
# lower_bound[xi] = x[0][xi]
# if upper_bound[xi] < x[0][xi]:
# upper_bound[xi] = x[0][xi]
meanU = list()
for x in range(NUM_DIMENSIONS):
meanU.append(X_MAX-X_MIN)
# list.sort(meanU)
# print(meanU)
return CT * sum(meanU)
def delta_t_branin(population):
meanU = list()
meanU.append(X_MAX-X_MIN)
meanU.append(Y_MAX-Y_MIN)
return CT * sum(meanU)
def check_for_solution(population):
solutions = 0
for x in population:
if abs(fitness(x[1])) < CONVERGENCE_PRECISION :
solutions += 1
return solutions
def evolve():
global CONVERGENT_EXECS
global kworst
global kbest
global INERTIA_NEIGHBORS
global FORAGING_SPEED
movement_vector = list()
population = generate_population()
krill = population[0]
solved = False
i = 0
best_change_iterations = 0
INERTIA_NEIGHBORS = 0.9
INERTIA_FOOD = 0.9
kworst = 0
kbest = 10**9
benchmarkFunctions.FUNCTION_EVALUATION = 0
while i < NUM_ITERATIONS:
i += 1
(best_krill, population_fitness) = select_best_krill(population)
x_food = food_position(population, population_fitness)
new_population = list()
iteration_min_fit = min(population_fitness)
iteration_max_fit = max(population_fitness)
if kworst < iteration_max_fit:
kworst = iteration_max_fit
if kbest > iteration_min_fit:
kbest = iteration_min_fit
best_change_iterations = 0
else:
best_change_iterations += 1
INERTIA_NEIGHBORS = 0.1 + 0.8 * (1 - i/float(NUM_ITERATIONS))
INERTIA_FOOD = 0.1 + 0.8 * (1 - i/float(NUM_ITERATIONS))
print "iteration "+ str(i)+ ": kworst = "+ str(kworst)+ " | kbest = "+ str(kbest)
dt = delta_t(population)
#print dt
# print population
for idx, krill in enumerate(population):
krill_best = krill[1]
(movement_vector, new_N, new_F) = dX_dt(krill[0], population_fitness[idx], krill_best, best_krill[0], x_food ,population, population_fitness, krill[2], krill[3],i)
new_krill_position = vector_sum(krill[0] ,vector_constant_product(movement_vector, dt))
if fitness(new_krill_position) < fitness(krill_best):
krill_best = new_krill_position
new_population.append((new_krill_position, krill_best, new_N, new_F));
# if USE_RECOMBINATION:
# offspring = generate_offspring(population)
population = new_population
solutions = check_for_solution(new_population)
CONVERGENT_INDIVIDUALS.append(solutions)
SOLUTION_FOUND_ITERATIONS.append(i)
print SOLUTION_FOUND_ITERATIONS
kbest_fit = map(lambda x: fitness(x[1]), population)
mean_pop_fitness = mean(kbest_fit)
KBEST_FITNESS.append(min(kbest_fit))
INDIVIDUALS_FITNESS.append(mean_pop_fitness)
print "best "+ str(population[kbest_fit.index(min(kbest_fit))][1])
print "Population fitness: " + str(mean_pop_fitness)
print "Convergent individuals: " + str(solutions)
if solutions > 0:
solved = True
CONVERGENT_EXECS+=1
print "Solution found after " + str(i) + " iterations"
else:
print "No solution found!"
def mean(list_items):
return sum(list_items)/float(len(list_items))
def std_dev(list_items, mean_items):
variance_list = map(lambda x : pow(x-mean_items, 2), list_items)
return math.sqrt(sum(variance_list)/float(len(list_items)))
def initialize_function(benchmark_params, dims):
global fitness
global X_MIN
global X_MAX
global CONVERGENCE_PRECISION
global NUM_DIMENSIONS
fitness = benchmark_params[0]
if dims==None:
NUM_DIMENSIONS = benchmark_params[1]
else:
NUM_DIMENSIONS = dims
CONVERGENCE_PRECISION = benchmark_params[2]
X_MIN = benchmark_params[3]
X_MAX = benchmark_params[4]
if fitness == benchmarkFunctions.branin:
global Y_MIN
global Y_MAX
global generate_population
global delta_t
Y_MIN = benchmark_params[5]
Y_MAX = benchmark_params[6]
generate_population = generate_population_branin
delta_t = delta_t_branin
def main(num_of_trials, function_params, dims=None):
initialize_function(function_params, dims)
print CONVERGENCE_PRECISION
print NUM_DIMENSIONS
print X_MAX
print X_MIN
print Y_MAX
print Y_MIN
for i in range(num_of_trials):
print "Execution " + str(i+1)
evolve()
print ""
mean_iterations = mean(SOLUTION_FOUND_ITERATIONS)
mean_fitness = mean(INDIVIDUALS_FITNESS)
mean_individuals = mean(CONVERGENT_INDIVIDUALS)
print "Convergent executions: " + str(CONVERGENT_EXECS)
print "Mean of iterations: " + str(mean_iterations)
# print "Std of iterations: " + str(std_dev(SOLUTION_FOUND_ITERATIONS, mean_iterations))
print "Mean of fitness: " + str(mean_fitness)
print "Std of fitness: " + str(std_dev(INDIVIDUALS_FITNESS, mean_fitness))
print "Mean of convergent indivs: " + str(mean_individuals)
print "Std of convergent indivs: " + str(std_dev(CONVERGENT_INDIVIDUALS, mean_individuals))
print "Best solution found " + str(min(KBEST_FITNESS))
print "Mean solution found " + str(mean(KBEST_FITNESS))
# print "Mean of total convergence iterations: " + str(mean_iter_total)
def test_case_2(benchmark_params):
dimensions = [2,4,6,8]
for dim in dimensions:
print 'DIMENSIONS: ' + str(dim)
main(25, benchmark_params, dim)
print ""
test_case_2(benchmarkFunctions.ACKLEY())
|
Open-Transport/synthese
|
refs/heads/master
|
server/utils/udf_proxy/dummy_synthese_server.py
|
1
|
#!/usr/bin/env python
#
# Simulates a Synthese server that just prints requests made to it.
# @file dummy_synthese_server.py
# @author Sylvain Pasche
#
# This file belongs to the SYNTHESE project (public transportation specialized software)
# Copyright (C) 2002 Hugues Romain - RCSmobility <contact@rcsmobility.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import BaseHTTPServer
import SimpleHTTPServer
class HandlerWithPost(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
print 'Got GET %r' % (self.path,)
self._dummy_response()
def do_POST(self):
data = self.rfile.read(int(self.headers['content-length']))
print 'Got POST %r %r' % (self.path, data)
self._dummy_response()
def _dummy_response(self):
self.send_response(200)
self.end_headers()
self.wfile.write('Dummy response\n')
def test(HandlerClass = HandlerWithPost,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
Bryan792/dotfiles
|
refs/heads/master
|
vim/vim.symlink/eclim/autoload/eclim/python/rope/refactor/importutils/importinfo.py
|
58
|
class ImportStatement(object):
"""Represent an import in a module
`readonly` attribute controls whether this import can be changed
by import actions or not.
"""
def __init__(self, import_info, start_line, end_line,
main_statement=None, blank_lines=0):
self.start_line = start_line
self.end_line = end_line
self.readonly = False
self.main_statement = main_statement
self._import_info = None
self.import_info = import_info
self._is_changed = False
self.new_start = None
self.blank_lines = blank_lines
def _get_import_info(self):
return self._import_info
def _set_import_info(self, new_import):
if not self.readonly and \
new_import is not None and not new_import == self._import_info:
self._is_changed = True
self._import_info = new_import
import_info = property(_get_import_info, _set_import_info)
def get_import_statement(self):
if self._is_changed or self.main_statement is None:
return self.import_info.get_import_statement()
else:
return self.main_statement
def empty_import(self):
self.import_info = ImportInfo.get_empty_import()
def move(self, lineno, blank_lines=0):
self.new_start = lineno
self.blank_lines = blank_lines
def get_old_location(self):
return self.start_line, self.end_line
def get_new_start(self):
return self.new_start
def is_changed(self):
return self._is_changed or (self.new_start is not None or
self.new_start != self.start_line)
def accept(self, visitor):
return visitor.dispatch(self)
class ImportInfo(object):
def get_imported_primaries(self, context):
pass
def get_imported_names(self, context):
return [primary.split('.')[0]
for primary in self.get_imported_primaries(context)]
def get_import_statement(self):
pass
def is_empty(self):
pass
def __hash__(self):
return hash(self.get_import_statement())
def _are_name_and_alias_lists_equal(self, list1, list2):
if len(list1) != len(list2):
return False
for pair1, pair2 in zip(list1, list2):
if pair1 != pair2:
return False
return True
def __eq__(self, obj):
return isinstance(obj, self.__class__) and \
self.get_import_statement() == obj.get_import_statement()
def __ne__(self, obj):
return not self.__eq__(obj)
@staticmethod
def get_empty_import():
return EmptyImport()
class NormalImport(ImportInfo):
def __init__(self, names_and_aliases):
self.names_and_aliases = names_and_aliases
def get_imported_primaries(self, context):
result = []
for name, alias in self.names_and_aliases:
if alias:
result.append(alias)
else:
result.append(name)
return result
def get_import_statement(self):
result = 'import '
for name, alias in self.names_and_aliases:
result += name
if alias:
result += ' as ' + alias
result += ', '
return result[:-2]
def is_empty(self):
return len(self.names_and_aliases) == 0
class FromImport(ImportInfo):
def __init__(self, module_name, level, names_and_aliases):
self.module_name = module_name
self.level = level
self.names_and_aliases = names_and_aliases
def get_imported_primaries(self, context):
if self.names_and_aliases[0][0] == '*':
module = self.get_imported_module(context)
return [name for name in module
if not name.startswith('_')]
result = []
for name, alias in self.names_and_aliases:
if alias:
result.append(alias)
else:
result.append(name)
return result
def get_imported_resource(self, context):
"""Get the imported resource
Returns `None` if module was not found.
"""
if self.level == 0:
return context.pycore.find_module(
self.module_name, folder=context.folder)
else:
return context.pycore.find_relative_module(
self.module_name, context.folder, self.level)
def get_imported_module(self, context):
"""Get the imported `PyModule`
Raises `rope.base.exceptions.ModuleNotFoundError` if module
could not be found.
"""
if self.level == 0:
return context.pycore.get_module(
self.module_name, context.folder)
else:
return context.pycore.get_relative_module(
self.module_name, context.folder, self.level)
def get_import_statement(self):
result = 'from ' + '.' * self.level + self.module_name + ' import '
for name, alias in self.names_and_aliases:
result += name
if alias:
result += ' as ' + alias
result += ', '
return result[:-2]
def is_empty(self):
return len(self.names_and_aliases) == 0
def is_star_import(self):
return len(self.names_and_aliases) > 0 and \
self.names_and_aliases[0][0] == '*'
class EmptyImport(ImportInfo):
names_and_aliases = []
def is_empty(self):
return True
def get_imported_primaries(self, context):
return []
class ImportContext(object):
def __init__(self, pycore, folder):
self.pycore = pycore
self.folder = folder
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/dumbdbm.py
|
251
|
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
|
jsirois/pants
|
refs/heads/master
|
src/python/pants/util/dirutil.py
|
4
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import atexit
import errno
import os
import shutil
import stat
import tempfile
import threading
import uuid
from collections import defaultdict
from contextlib import contextmanager
from typing import Any, Callable, DefaultDict, Iterator, Sequence, Set, overload
from typing_extensions import Literal
from pants.util.strutil import ensure_text
def longest_dir_prefix(path: str, prefixes: Sequence[str]) -> str | None:
"""Given a list of prefixes, return the one that is the longest prefix to the given path.
Returns None if there are no matches.
"""
longest_match, longest_prefix = 0, None
for prefix in prefixes:
if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match:
longest_match, longest_prefix = len(prefix), prefix
return longest_prefix
def fast_relpath(path: str, start: str) -> str:
"""A prefix-based relpath, with no normalization or support for returning `..`."""
relpath = fast_relpath_optional(path, start)
if relpath is None:
raise ValueError(f"{start} is not a directory containing {path}")
return relpath
def fast_relpath_optional(path: str, start: str) -> str | None:
"""A prefix-based relpath, with no normalization or support for returning `..`.
Returns None if `start` is not a directory-aware prefix of `path`.
"""
if len(start) == 0:
# Empty prefix.
return path
# Determine where the matchable prefix ends.
pref_end = len(start) - 1 if start[-1] == "/" else len(start)
if pref_end > len(path):
# The prefix is too long to match.
return None
elif path[:pref_end] == start[:pref_end] and (len(path) == pref_end or path[pref_end] == "/"):
# The prefix matches, and the entries are either identical, or the suffix indicates that
# the prefix is a directory.
return path[pref_end + 1 :]
return None
def safe_mkdir(directory: str, clean: bool = False) -> None:
"""Ensure a directory is present.
If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty.
:API: public
"""
if clean:
safe_rmtree(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_mkdir_for(path: str, clean: bool = False) -> None:
"""Ensure that the parent directory for a file is present.
If it's not there, create it. If it is, no-op.
"""
safe_mkdir(os.path.dirname(path), clean=clean)
def safe_file_dump(
filename: str, payload: bytes | str = "", mode: str = "w", makedirs: bool = False
) -> None:
"""Write a string to a file.
This method is "safe" to the extent that `safe_open` is "safe". See the explanation on the method
doc there.
When `payload` is an empty string (the default), this method can be used as a concise way to
create an empty file along with its containing directory (or truncate it if it already exists).
:param filename: The filename of the file to write to.
:param payload: The string to write to the file.
:param mode: A mode argument for the python `open` builtin which should be a write mode variant.
Defaults to 'w'.
:param makedirs: Whether to make all parent directories of this file before making it.
"""
if makedirs:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with safe_open(filename, mode=mode) as f:
f.write(payload)
@overload
def maybe_read_file(filename: str) -> str | None:
...
@overload
def maybe_read_file(filename: str, binary_mode: Literal[False]) -> str | None:
...
@overload
def maybe_read_file(filename: str, binary_mode: Literal[True]) -> bytes | None:
...
@overload
def maybe_read_file(filename: str, binary_mode: bool) -> bytes | str | None:
...
def maybe_read_file(filename: str, binary_mode: bool = False) -> bytes | str | None:
"""Read and return the contents of a file in a single file.read().
:param filename: The filename of the file to read.
:param binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file, or None if opening the file fails for any reason
"""
try:
return read_file(filename, binary_mode=binary_mode)
except IOError:
return None
@overload
def read_file(filename: str) -> str:
...
@overload
def read_file(filename: str, binary_mode: Literal[False]) -> str:
...
@overload
def read_file(filename: str, binary_mode: Literal[True]) -> bytes:
...
@overload
def read_file(filename: str, binary_mode: bool) -> bytes | str:
...
def read_file(filename: str, binary_mode: bool = False) -> bytes | str:
"""Read and return the contents of a file in a single file.read().
:param filename: The filename of the file to read.
:param binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file.
"""
mode = "rb" if binary_mode else "r"
with open(filename, mode) as f:
content: bytes | str = f.read()
return content
def safe_walk(path: bytes | str, **kwargs: Any) -> Iterator[tuple[str, list[str], list[str]]]:
"""Just like os.walk, but ensures that the returned values are unicode objects.
This isn't strictly safe, in that it is possible that some paths
will not be decodeable, but that case is rare, and the only
alternative is to somehow avoid all interaction between paths and
unicode objects, which seems especially tough in the presence of
unicode_literals. See e.g.
https://mail.python.org/pipermail/python-dev/2008-December/083856.html
:API: public
"""
# If os.walk is given a text argument, it yields text values; if it
# is given a binary argument, it yields binary values.
return os.walk(ensure_text(path), **kwargs)
_MkdtempCleanerType = Callable[[], None]
_MKDTEMP_CLEANER: _MkdtempCleanerType | None = None
_MKDTEMP_DIRS: DefaultDict[int, Set[str]] = defaultdict(set)
_MKDTEMP_LOCK = threading.RLock()
def _mkdtemp_atexit_cleaner() -> None:
for td in _MKDTEMP_DIRS.pop(os.getpid(), []):
safe_rmtree(td)
def _mkdtemp_unregister_cleaner() -> None:
global _MKDTEMP_CLEANER
_MKDTEMP_CLEANER = None
def _mkdtemp_register_cleaner(cleaner: _MkdtempCleanerType) -> None:
global _MKDTEMP_CLEANER
assert callable(cleaner)
if _MKDTEMP_CLEANER is None:
atexit.register(cleaner)
_MKDTEMP_CLEANER = cleaner
def safe_mkdtemp(cleaner: _MkdtempCleanerType = _mkdtemp_atexit_cleaner, **kw: Any) -> str:
"""Create a temporary directory that is cleaned up on process exit.
Arguments are as to tempfile.mkdtemp.
:API: public
"""
# Proper lock sanitation on fork [issue 6721] would be desirable here.
with _MKDTEMP_LOCK:
return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)
def register_rmtree(directory: str, cleaner: _MkdtempCleanerType = _mkdtemp_atexit_cleaner) -> str:
"""Register an existing directory to be cleaned up at process exit."""
with _MKDTEMP_LOCK:
_mkdtemp_register_cleaner(cleaner)
_MKDTEMP_DIRS[os.getpid()].add(directory)
return directory
def safe_rmtree(directory: str) -> None:
"""Delete a directory if it's present. If it's not present, no-op.
Note that if the directory argument is a symlink, only the symlink will
be deleted.
:API: public
"""
if os.path.islink(directory):
safe_delete(directory)
else:
shutil.rmtree(directory, ignore_errors=True)
def safe_open(filename, *args, **kwargs):
"""Open a file safely, ensuring that its directory exists.
:API: public
"""
safe_mkdir_for(filename)
return open(filename, *args, **kwargs)
def safe_delete(filename: str) -> None:
"""Delete a file safely.
If it's not present, no-op.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_concurrent_rename(src: str, dst: str) -> None:
"""Rename src to dst, ignoring errors due to dst already existing.
Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.
"""
# Delete dst, in case it existed (with old content) even before any concurrent processes
# attempted this write. This ensures that at least one process writes the new content.
if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.
safe_rmtree(dst)
else:
safe_delete(dst)
try:
shutil.move(src, dst)
except IOError as e:
if e.errno != errno.EEXIST:
raise
@contextmanager
def safe_concurrent_creation(target_path: str) -> Iterator[str]:
"""A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix.
"""
safe_mkdir_for(target_path)
tmp_path = f"{target_path}.tmp.{uuid.uuid4().hex}"
try:
yield tmp_path
except Exception:
rm_rf(tmp_path)
raise
else:
if os.path.exists(tmp_path):
safe_concurrent_rename(tmp_path, target_path)
def chmod_plus_x(path: str) -> None:
"""Equivalent of unix `chmod a+x path`"""
path_mode = os.stat(path).st_mode
path_mode &= int("777", 8)
if path_mode & stat.S_IRUSR:
path_mode |= stat.S_IXUSR
if path_mode & stat.S_IRGRP:
path_mode |= stat.S_IXGRP
if path_mode & stat.S_IROTH:
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
def absolute_symlink(source_path: str, target_path: str) -> None:
"""Create a symlink at target pointing to source using the absolute path.
:param source_path: Absolute path to source file
:param target_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError(f"Path for source : {source_path} must be absolute")
if not os.path.isabs(target_path):
raise ValueError(f"Path for link : {target_path} must be absolute")
if source_path == target_path:
raise ValueError(f"Path for link is identical to source : {source_path}")
try:
if os.path.lexists(target_path):
if os.path.islink(target_path) or os.path.isfile(target_path):
os.unlink(target_path)
else:
shutil.rmtree(target_path)
safe_mkdir_for(target_path)
os.symlink(source_path, target_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def relative_symlink(source_path: str, link_path: str) -> None:
"""Create a symlink at link_path pointing to relative source.
:param source_path: Absolute path to source file
:param link_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError(f"Path for source:{source_path} must be absolute")
if not os.path.isabs(link_path):
raise ValueError(f"Path for link:{link_path} must be absolute")
if source_path == link_path:
raise ValueError(f"Path for link is identical to source:{source_path}")
# The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch.
# Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories.
if os.path.isdir(link_path) and not os.path.islink(link_path):
raise ValueError(f"Path for link would overwrite an existing directory: {link_path}")
try:
if os.path.lexists(link_path):
os.unlink(link_path)
rel_path = os.path.relpath(source_path, os.path.dirname(link_path))
safe_mkdir_for(link_path)
os.symlink(rel_path, link_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def touch(path: str, times: int | tuple[int, int] | None = None):
"""Equivalent of unix `touch path`.
:API: public
:path: The file to touch.
:times Either a tuple of (atime, mtime) or else a single time to use for both. If not
specified both atime and mtime are updated to the current time.
"""
if isinstance(times, tuple) and len(times) > 2:
raise ValueError(
"`times` must either be a tuple of (atime, mtime) or else a single time to use for both."
)
if isinstance(times, int):
times = (times, times)
with safe_open(path, "a"):
os.utime(path, times)
def recursive_dirname(f: str) -> Iterator[str]:
"""Given a relative path like 'a/b/c/d', yield all ascending path components like:
'a/b/c/d'
'a/b/c'
'a/b'
'a'
''
"""
prev = None
while f != prev:
yield f
prev = f
f = os.path.dirname(f)
yield ""
def rm_rf(name: str) -> None:
"""Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell.
:param name: the name of the file or directory to remove.
:raises: OSError on error.
"""
if not os.path.exists(name):
return
try:
# Avoid using safe_rmtree so we can detect failures.
shutil.rmtree(name)
except OSError as e:
if e.errno == errno.ENOTDIR:
# 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure.
safe_delete(name)
elif e.errno != errno.ENOENT:
# Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc.
raise
|
cledio66/pyglet
|
refs/heads/master
|
tests/image/PYPNG_LA_SAVE.py
|
29
|
#!/usr/bin/env python
'''Test LA save using PyPNG. You should see la.png reference image
on the left, and saved (and reloaded) image on the right. The saved image
may have larger dimensions due to texture size restrictions.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
import base_save
from pyglet.image.codecs.png import PNGImageEncoder
class TEST_PNG_LA_SAVE(base_save.TestSave):
texture_file = 'la.png'
encoder = PNGImageEncoder()
if __name__ == '__main__':
unittest.main()
|
emirot/codefights
|
refs/heads/master
|
the_core/reverseOnDiagonals.py
|
1
|
def reverseOnDiagonals(matrix):
diag_left = []
diag_right = []
z = len(matrix)-1
for i in range(len(matrix)):
diag_left.append(matrix[i][i])
i = 0
while z >= 0:
diag_right.append(matrix[z][i])
z -=1
i +=1
diag_left = list(reversed(diag_left))
diag_right = list(reversed(diag_right))
z = len(matrix)-1
for i in range(len(matrix)):
matrix[i][i] = diag_left[i]
i = 0
while z >= 0:
matrix[z][i] = diag_right[i]
z -=1
i +=1
return matrix
|
shubhdev/edxOnBaadal
|
refs/heads/master
|
lms/djangoapps/courseware/testutils.py
|
10
|
"""
Common test utilities for courseware functionality
"""
from abc import ABCMeta, abstractmethod
from datetime import datetime
import ddt
from mock import patch
from lms.djangoapps.courseware.url_helpers import get_redirect_url
from student.tests.factories import AdminFactory, UserFactory, CourseEnrollmentFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
@ddt.ddt
class RenderXBlockTestMixin(object):
"""
Mixin for testing the courseware.render_xblock function.
It can be used for testing any higher-level endpoint that calls this method.
"""
__metaclass__ = ABCMeta
# DOM elements that appear in the LMS Courseware,
# but are excluded from the xBlock-only rendering.
COURSEWARE_CHROME_HTML_ELEMENTS = [
'<header id="open_close_accordion"',
'<ol class="course-tabs"',
'<footer id="footer-openedx"',
'<div class="window-wrap"',
'<div class="preview-menu"',
'<div class="container"'
]
# DOM elements that appear in an xBlock,
# but are excluded from the xBlock-only rendering.
XBLOCK_REMOVED_HTML_ELEMENTS = [
'<div class="wrap-instructor-info"',
]
@abstractmethod
def get_response(self):
"""
Abstract method to get the response from the endpoint that is being tested.
"""
pass # pragma: no cover
def login(self):
"""
Logs in the test user.
"""
self.client.login(username=self.user.username, password='test')
def setup_course(self, default_store=None):
"""
Helper method to create the course.
"""
if not default_store:
default_store = self.store.default_modulestore.get_modulestore_type()
with self.store.default_store(default_store):
self.course = CourseFactory.create() # pylint: disable=attribute-defined-outside-init
chapter = ItemFactory.create(parent=self.course, category='chapter')
self.html_block = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=chapter,
category='html',
data="<p>Test HTML Content<p>"
)
def setup_user(self, admin=False, enroll=False, login=False):
"""
Helper method to create the user.
"""
self.user = AdminFactory() if admin else UserFactory() # pylint: disable=attribute-defined-outside-init
if enroll:
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
if login:
self.login()
def verify_response(self, expected_response_code=200):
"""
Helper method that calls the endpoint, verifies the expected response code, and returns the response.
"""
response = self.get_response()
if expected_response_code == 200:
self.assertContains(response, self.html_block.data, status_code=expected_response_code)
for chrome_element in [self.COURSEWARE_CHROME_HTML_ELEMENTS + self.XBLOCK_REMOVED_HTML_ELEMENTS]:
self.assertNotContains(response, chrome_element)
else:
self.assertNotContains(response, self.html_block.data, status_code=expected_response_code)
return response
@ddt.data(
(ModuleStoreEnum.Type.mongo, 8),
(ModuleStoreEnum.Type.split, 5),
)
@ddt.unpack
def test_courseware_html(self, default_store, mongo_calls):
"""
To verify that the removal of courseware chrome elements is working,
we include this test here to make sure the chrome elements that should
be removed actually exist in the full courseware page.
If this test fails, it's probably because the HTML template for courseware
has changed and COURSEWARE_CHROME_HTML_ELEMENTS needs to be updated.
"""
with self.store.default_store(default_store):
self.setup_course(default_store)
self.setup_user(admin=True, enroll=True, login=True)
with check_mongo_calls(mongo_calls):
url = get_redirect_url(self.course.id, self.html_block.location)
response = self.client.get(url)
for chrome_element in self.COURSEWARE_CHROME_HTML_ELEMENTS:
self.assertContains(response, chrome_element)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 5),
(ModuleStoreEnum.Type.split, 5),
)
@ddt.unpack
def test_success_enrolled_staff(self, default_store, mongo_calls):
with self.store.default_store(default_store):
self.setup_course(default_store)
self.setup_user(admin=True, enroll=True, login=True)
# The 5 mongoDB calls include calls for
# Old Mongo:
# (1) fill_in_run
# (2) get_course in get_course_with_access
# (3) get_item for HTML block in get_module_by_usage_id
# (4) get_parent when loading HTML block
# (5) edx_notes descriptor call to get_course
# Split:
# (1) course_index - bulk_operation call
# (2) structure - get_course_with_access
# (3) definition - get_course_with_access
# (4) definition - HTML block
# (5) definition - edx_notes decorator (original_get_html)
with check_mongo_calls(mongo_calls):
self.verify_response()
def test_success_unenrolled_staff(self):
self.setup_course()
self.setup_user(admin=True, enroll=False, login=True)
self.verify_response()
def test_success_enrolled_student(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.verify_response()
def test_unauthenticated(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=False)
self.verify_response(expected_response_code=404)
def test_unenrolled_student(self):
self.setup_course()
self.setup_user(admin=False, enroll=False, login=True)
self.verify_response(expected_response_code=404)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_fail_block_unreleased(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.html_block.start = datetime.max
modulestore().update_item(self.html_block, self.user.id) # pylint: disable=no-member
self.verify_response(expected_response_code=404)
def test_fail_block_nonvisible(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.html_block.visible_to_staff_only = True
modulestore().update_item(self.html_block, self.user.id) # pylint: disable=no-member
self.verify_response(expected_response_code=404)
|
fduraffourg/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo-query_v13_wsh.py
|
266
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.unparsed_uri.split('?')[1] or '')
return
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/fca/package.py
|
2
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fca(Package):
"""Legacy interface for Mellanox Fabric Collective Accelerator (FCA). FCA
is a MPI-integrated software package that utilizes CORE-Direct technology
for implementing the MPI collective communications."""
homepage = 'https://www.mellanox.com/products/fca'
has_code = False
version('2.5.2431')
# FCA needs to be added as an external package to SPACK. For this, the
# config file packages.yaml needs to be adjusted:
#
# fca:
# version: [2.5.2431]
# paths:
# fca@2.5.2431: /opt/mellanox/fca (path to your FCA installation)
# buildable: False
def install(self, spec, prefix):
raise InstallError(
self.spec.format('{name} is not installable, you need to specify '
'it as an external package in packages.yaml'))
|
anatm/administrator
|
refs/heads/master
|
git-1.8.1/git_remote_helpers/git/repo.py
|
45
|
import os
import subprocess
from git_remote_helpers.util import check_call
def sanitize(rev, sep='\t'):
"""Converts a for-each-ref line to a name/value pair.
"""
splitrev = rev.split(sep)
branchval = splitrev[0]
branchname = splitrev[1].strip()
if branchname.startswith("refs/heads/"):
branchname = branchname[11:]
return branchname, branchval
def is_remote(url):
"""Checks whether the specified value is a remote url.
"""
prefixes = ["http", "file", "git"]
for prefix in prefixes:
if url.startswith(prefix):
return True
return False
class GitRepo(object):
"""Repo object representing a repo.
"""
def __init__(self, path):
"""Initializes a new repo at the given path.
"""
self.path = path
self.head = None
self.revmap = {}
self.local = not is_remote(self.path)
if(self.path.endswith('.git')):
self.gitpath = self.path
else:
self.gitpath = os.path.join(self.path, '.git')
if self.local and not os.path.exists(self.gitpath):
os.makedirs(self.gitpath)
def get_revs(self):
"""Fetches all revs from the remote.
"""
args = ["git", "ls-remote", self.gitpath]
path = ".cached_revs"
ofile = open(path, "w")
check_call(args, stdout=ofile)
output = open(path).readlines()
self.revmap = dict(sanitize(i) for i in output)
if "HEAD" in self.revmap:
del self.revmap["HEAD"]
self.revs = self.revmap.keys()
ofile.close()
def get_head(self):
"""Determines the head of a local repo.
"""
if not self.local:
return
path = os.path.join(self.gitpath, "HEAD")
head = open(path).readline()
self.head, _ = sanitize(head, ' ')
|
nirmeshk/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/core/context_processors.py
|
232
|
"""
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
_get_val = lazy(_get_val, six.text_type)
return {'csrf_token': _get_val() }
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
|
pystruct/pystruct
|
refs/heads/master
|
pystruct/tests/test_learners/test_subgradient_latent_svm.py
|
5
|
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from pystruct.models import LatentGridCRF, LatentDirectionalGridCRF, GridCRF
from pystruct.learners import SubgradientLatentSSVM, SubgradientSSVM
from pystruct.inference import get_installed
from pystruct.datasets import generate_blocks_multinomial, generate_easy
def test_with_crosses():
# this test is just to unstable / not working
pass
# very simple dataset. k-means init is perfect
#for n_states_per_label in [2, [1, 2]]:
#for n_states_per_label in [2]:
# test with 2 states for both foreground and background,
# as well as with single background state
#for inference_method in ['ad3', 'qpbo', 'lp']:
#for inference_method in ['qpbo']:
#X, Y = generate_crosses(n_samples=20, noise=2, n_crosses=1,
#total_size=8, seed=0)
#n_labels = 2
#crf = LatentGridCRF(n_labels=n_labels,
#n_states_per_label=n_states_per_label,
#inference_method=inference_method)
#clf = SubgradientLatentSSVM(model=crf, max_iter=2250, C=1000.,
#verbose=20, learning_rate=1,
#show_loss_every=0, momentum=0.0,
#decay_exponent=1, decay_t0=10)
#clf.fit(X, Y)
#Y_pred = clf.predict(X)
#assert_array_equal(np.array(Y_pred), Y)
def test_objective():
# test that SubgradientLatentSSVM does the same as SubgradientSVM,
# in particular that it has the same loss, if there are no latent states.
X, Y = generate_blocks_multinomial(n_samples=10, noise=.3, seed=1)
inference_method = get_installed(["qpbo", "ad3", "lp"])[0]
n_labels = 3
crfl = LatentGridCRF(n_labels=n_labels, n_states_per_label=1,
inference_method=inference_method)
clfl = SubgradientLatentSSVM(model=crfl, max_iter=20, C=10.,
learning_rate=0.001, momentum=0.98)
crfl.initialize(X, Y)
clfl.w = np.zeros(crfl.size_joint_feature) # this disables random init
clfl.fit(X, Y)
crf = GridCRF(n_states=n_labels, inference_method=inference_method)
clf = SubgradientSSVM(model=crf, max_iter=20, C=10., learning_rate=0.001,
momentum=0.98)
clf.fit(X, Y)
assert_array_almost_equal(clf.w, clfl.w)
assert_almost_equal(clf.objective_curve_[-1], clfl.objective_curve_[-1])
assert_array_equal(clf.predict(X), clfl.predict(X))
assert_array_equal(clf.predict(X), Y)
#def test_with_crosses_bad_init():
# # use less perfect initialization
# X, Y = generate_crosses(n_samples=10, noise=5, n_crosses=1,
# total_size=8)
# n_labels = 2
# crf = LatentGridCRF(n_labels=n_labels, n_states_per_label=2,
# inference_method='lp')
# clf = SubgradientLatentSSVM(model=crf, max_iter=50, C=10. ** 3)
# H_init = crf.init_latent(X, Y)
# mask = np.random.uniform(size=H_init.shape) > .7
# H_init[mask] = 2 * (H_init[mask] / 2)
# clf.fit(X, Y, H_init=H_init)
# Y_pred = clf.predict(X)
# assert_array_equal(np.array(Y_pred), Y)
def test_directional_bars():
# this test is very fragile :-/
X, Y = generate_easy(n_samples=20, noise=2, box_size=2, total_size=6,
seed=2)
n_labels = 2
crf = LatentDirectionalGridCRF(n_labels=n_labels,
n_states_per_label=[1, 4])
clf = SubgradientLatentSSVM(model=crf, max_iter=75, C=10.,
learning_rate=1, momentum=0,
decay_exponent=0.5, decay_t0=10)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(np.array(Y_pred), Y)
|
freddiebarrsmith/phpmongobuildpackcf
|
refs/heads/master
|
tests/test_compile_helpers.py
|
9
|
import os
import os.path
import tempfile
import shutil
from nose.tools import eq_
from build_pack_utils import utils
from compile_helpers import setup_webdir_if_it_doesnt_exist
from compile_helpers import convert_php_extensions
from compile_helpers import is_web_app
from compile_helpers import find_stand_alone_app_to_run
from compile_helpers import load_manifest
from compile_helpers import find_all_php_versions
from compile_helpers import validate_php_version
from compile_helpers import setup_log_dir
class TestCompileHelpers(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp(prefix='build-')
self.cache_dir = tempfile.mkdtemp(prefix='cache-')
os.rmdir(self.build_dir) # delete otherwise copytree complains
os.rmdir(self.cache_dir) # cache dir does not exist normally
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
for name in os.listdir(os.environ['TMPDIR']):
if name.startswith('httpd-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
if name.startswith('php-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
def assert_exists(self, *args):
eq_(True, os.path.exists(os.path.join(*args)),
"Does not exists: %s" % os.path.join(*args))
def test_setup_log_dir(self):
eq_(False, os.path.exists(os.path.join(self.build_dir, 'logs')))
setup_log_dir({
'BUILD_DIR': self.build_dir
})
self.assert_exists(self.build_dir, 'logs')
def test_setup_if_webdir_exists(self):
shutil.copytree('tests/data/app-1', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_exists(self):
shutil.copytree('tests/data/app-6', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(3, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_htdocs_does_not_exist_but_library_does(self):
shutil.copytree('tests/data/app-7', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, 'htdocs', 'library')
self.assert_exists(self.build_dir, 'htdocs', 'library', 'junk.php')
self.assert_exists(self.build_dir, 'lib')
self.assert_exists(self.build_dir, 'lib', 'test.php')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, 'manifest.yml')
eq_(4, len(os.listdir(self.build_dir)))
eq_(4, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_with_stand_alone_app(self):
shutil.copytree('tests/data/app-5', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEB_SERVER': 'none'
}))
self.assert_exists(self.build_dir, 'app.php')
eq_(1, len(os.listdir(self.build_dir)))
def test_convert_php_extensions_55(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"\nzend_extension="zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_none(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_one(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('zend_extension="zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_is_web_app(self):
ctx = {}
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'nginx'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'httpd'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'none'
eq_(False, is_web_app(ctx))
def test_find_stand_alone_app_to_run_app_start_cmd(self):
ctx = {'APP_START_CMD': "echo 'Hello World!'"}
eq_("echo 'Hello World!'", find_stand_alone_app_to_run(ctx))
results = ('app.php', 'main.php', 'run.php', 'start.php', 'app.php')
for i, res in enumerate(results):
ctx = {'BUILD_DIR': 'tests/data/standalone/test%d' % (i + 1)}
eq_(res, find_stand_alone_app_to_run(ctx))
def test_load_manifest(self):
ctx = {'BP_DIR': '.'}
manifest = load_manifest(ctx)
assert manifest is not None
assert 'dependencies' in manifest.keys()
assert 'language' in manifest.keys()
assert 'url_to_dependency_map' in manifest.keys()
assert 'exclude_files' in manifest.keys()
def test_find_all_php_versions(self):
ctx = {'BP_DIR': '.'}
manifest = load_manifest(ctx)
dependencies = manifest['dependencies']
versions = find_all_php_versions(dependencies)
eq_(2, len([v for v in versions if v.startswith('5.5.')]))
eq_(2, len([v for v in versions if v.startswith('5.6.')]))
def test_validate_php_version(self):
ctx = {
'ALL_PHP_VERSIONS': ['5.5.31', '5.5.30'],
'PHP_55_LATEST': '5.5.31',
'PHP_VERSION': '5.5.30'
}
validate_php_version(ctx)
eq_('5.5.30', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.5.29'
validate_php_version(ctx)
eq_('5.5.31', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.5.30'
validate_php_version(ctx)
eq_('5.5.30', ctx['PHP_VERSION'])
|
Juniper/nova
|
refs/heads/master
|
nova/console/serial.py
|
10
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Serial consoles module."""
import socket
from oslo_log import log as logging
import six.moves
import nova.conf
from nova import exception
from nova.i18n import _LW
from nova import utils
LOG = logging.getLogger(__name__)
ALLOCATED_PORTS = set() # in-memory set of already allocated ports
SERIAL_LOCK = 'serial-lock'
CONF = nova.conf.CONF
# TODO(sahid): Add a method to initialize ALLOCATED_PORTS with the
# already binded TPC port(s). (cf from danpb: list all running guests and
# query the XML in libvirt driver to find out the TCP port(s) it uses).
@utils.synchronized(SERIAL_LOCK)
def acquire_port(host):
"""Returns a free TCP port on host.
Find and returns a free TCP port on 'host' in the range
of 'CONF.serial_console.port_range'.
"""
start, stop = _get_port_range()
for port in six.moves.range(start, stop):
if (host, port) in ALLOCATED_PORTS:
continue
try:
_verify_port(host, port)
ALLOCATED_PORTS.add((host, port))
return port
except exception.SocketPortInUseException as e:
LOG.warning(e.format_message())
raise exception.SocketPortRangeExhaustedException(host=host)
@utils.synchronized(SERIAL_LOCK)
def release_port(host, port):
"""Release TCP port to be used next time."""
ALLOCATED_PORTS.discard((host, port))
def _get_port_range():
config_range = CONF.serial_console.port_range
start, stop = map(int, config_range.split(':'))
if start >= stop:
default_port_range = nova.conf.serial_console.DEFAULT_PORT_RANGE
LOG.warning(_LW("serial_console.port_range should be in the "
"format <start>:<stop> and start < stop, "
"Given value %(port_range)s is invalid. "
"Taking the default port range %(default)s."),
{'port_range': config_range,
'default': default_port_range})
start, stop = map(int, default_port_range.split(':'))
return start, stop
def _verify_port(host, port):
s = socket.socket()
try:
s.bind((host, port))
except socket.error as e:
raise exception.SocketPortInUseException(
host=host, port=port, error=e)
finally:
s.close()
|
sstruct/flasky
|
refs/heads/master
|
migrations/versions/51f5ccfba190_comments.py
|
113
|
"""comments
Revision ID: 51f5ccfba190
Revises: 2356a38169ea
Create Date: 2014-01-01 12:08:43.287523
"""
# revision identifiers, used by Alembic.
revision = '51f5ccfba190'
down_revision = '2356a38169ea'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_comments_timestamp', 'comments', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_comments_timestamp', 'comments')
op.drop_table('comments')
### end Alembic commands ###
|
hassoon3/odoo
|
refs/heads/8.0
|
addons/crm/wizard/crm_phonecall_to_meeting.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_phonecall2meeting(osv.osv_memory):
""" Phonecall to Meeting """
_name = 'crm.phonecall2meeting'
_description = 'Phonecall To Meeting'
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Meeting form
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Phonecall to Meeting IDs
@param context: A standard dictionary for contextual values
"""
return {'type':'ir.actions.act_window_close'}
def action_make_meeting(self, cr, uid, ids, context=None):
""" This opens Meeting's calendar view to schedule meeting on current Phonecall
@return : Dictionary value for created Meeting view
"""
res = {}
phonecall_id = context and context.get('active_id', False) or False
if phonecall_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, phonecall_id, context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_state': 'open',
'default_name': phonecall.name,
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nicobustillos/odoo
|
refs/heads/8.0
|
addons/hr_contract/__openerp__.py
|
52
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Contracts',
'version': '1.0',
'category': 'Human Resources',
'description': """
Add all information on the employee form to manage contracts.
=============================================================
* Contract
* Place of Birth,
* Medical Examination Date
* Company Vehicle
You can assign several contracts per employee.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'images': ['images/hr_contract.jpeg'],
'depends': ['base_action_rule', 'hr'],
'data': [
'security/ir.model.access.csv',
'hr_contract_view.xml',
'hr_contract_data.xml',
'base_action_rule_view.xml',
],
'demo': [],
'test': ['test/test_hr_contract.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cjhak/b2share
|
refs/heads/master
|
invenio/modules/formatter/views.py
|
8
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Formater Blueprint"""
from flask import Blueprint
blueprint = Blueprint('formatter', __name__,
template_folder='templates', static_folder='static')
|
AlericInglewood/3p-colladadom
|
refs/heads/singularity
|
projects/vc8to9.py
|
2
|
import sys, os, xml, string, getopt, StringIO
from os import path
from os.path import join
from xml.dom import minidom, Node
def slnToVC9(vc8Sln, vc9Sln):
vc8File = open(vc8Sln, 'r')
vc9File = open(vc9Sln, 'w')
if not vc8File or not vc9File:
return False
for line in vc8File:
if line.find('Microsoft Visual Studio Solution File, Format Version 9.00') != -1:
vc9File.write('Microsoft Visual Studio Solution File, Format Version 10.00\n')
elif line.find('# Visual Studio 2005') != -1:
vc9File.write('# Visual Studio 2008\n')
else:
vc9File.write(line)
return True
def vcprojToVC9Recursive(node):
if node.nodeType == Node.ELEMENT_NODE and node.tagName == 'VisualStudioProject':
attr = node.getAttributeNode('Version')
if attr:
attr.nodeValue = '9.00'
else:
return False
node.setAttribute('TargetFrameworkVersion', '131072')
if node.nodeType == Node.ELEMENT_NODE and node.tagName == 'Tool':
if node.getAttribute('Name') == 'VCLinkerTool':
node.setAttribute('RandomizedBaseAddress', '1')
node.setAttribute('DataExecutionPrevention', '0')
if node.nodeType == Node.ELEMENT_NODE and node.tagName == 'Tool':
if node.getAttribute('Name') == 'VCWebDeploymentTool':
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE and node.tagName == 'Tool':
if node.getAttribute('Name') == 'VCCLCompilerTool':
if node.getAttributeNode('Detect64BitPortabilityProblems'):
node.setAttribute('Detect64BitPortabilityProblems', 'false')
map(vcprojToVC9Recursive, node.childNodes)
def vcprojToVC9(vc8Proj, vc9Proj):
root = minidom.parse(vc8Proj)
if not root:
return False
vcprojToVC9Recursive(root)
root.writexml(open(vc9Proj, 'w'))
return True
def toVC9(vc8Path, vc9Path):
if not path.exists(vc9Path):
os.mkdir(vc9Path)
for path_ in os.listdir(vc8Path):
if path.splitext(path_)[1] == '.sln':
if not slnToVC9(join(vc8Path, path_), join(vc9Path, path_)):
return False
elif path.splitext(path_)[1] == '.vcproj':
if not vcprojToVC9(join(vc8Path, path_), join(vc9Path, path_)):
return False
return True
def vcprojConvertVC8Tags(vc9Proj):
vc9File = open(vc9Proj, 'r')
vc9Out = StringIO.StringIO()
if not vc9File or not vc9Out:
return False
for line in vc9File:
line = line.replace('vc8', 'vc9')
line = line.replace('VC8', 'VC9')
vc9Out.write(line)
vc9File.close()
vc9File = open(vc9Proj, 'w')
vc9File.write(vc9Out.getvalue())
return True
def convertVC8Tags(dir):
if not path.exists(dir):
return False
for path_ in os.listdir(dir):
if path.splitext(path_)[1] == '.vcproj':
if not vcprojConvertVC8Tags(join(dir, path_)):
return False
return True
if len(sys.argv) < 2 or not path.exists(sys.argv[1]):
print 'failed'
vc8Path = sys.argv[1]
vc9Path = join(vc8Path, '..', 'vc9')
if len(sys.argv) > 2:
vc9Path = sys.argv[2]
if not toVC9(vc8Path, vc9Path):
print 'failed'
if len(sys.argv) > 3 and sys.argv[3] == 'convertVC8Tags':
if not convertVC8Tags(vc9Path):
print 'failed'
|
fedspendingtransparency/data-act-broker-backend
|
refs/heads/development
|
dataactcore/migrations/versions/807a203713a4_add_dates_to_tas_index.py
|
1
|
"""Add dates to tas index
Revision ID: 807a203713a4
Revises: bb33cc8f0a3e
Create Date: 2016-11-09 19:47:52.671178
"""
# revision identifiers, used by Alembic.
revision = '807a203713a4'
down_revision = 'bb33cc8f0a3e'
branch_labels = None
depends_on = None
from alembic import op
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_tas', table_name='tas_lookup')
op.create_index('ix_tas', 'tas_lookup', ['allocation_transfer_agency', 'agency_identifier', 'beginning_period_of_availability', 'ending_period_of_availability', 'availability_type_code', 'main_account_code', 'sub_account_code', 'internal_start_date', 'internal_end_date'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_tas', table_name='tas_lookup')
op.create_index('ix_tas', 'tas_lookup', ['allocation_transfer_agency', 'agency_identifier', 'beginning_period_of_availability', 'ending_period_of_availability', 'availability_type_code', 'main_account_code', 'sub_account_code'], unique=False)
### end Alembic commands ###
|
andrey-utkin/linux
|
refs/heads/master
|
scripts/gdb/linux/lists.py
|
509
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <thiebaud@weksteen.fr>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_for_each(head):
if head.type == list_head.get_type().pointer():
head = head.dereference()
elif head.type != list_head.get_type():
raise gdb.GdbError("Must be struct list_head not {}"
.format(head.type))
node = head['next'].dereference()
while node.address != head.address:
yield node.address
node = node['next'].dereference()
def list_for_each_entry(head, gdbtype, member):
for node in list_for_each(head):
if node.type != list_head.get_type().pointer():
raise TypeError("Type {} found. Expected struct list_head *."
.format(node.type))
yield utils.container_of(node, gdbtype, member)
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
danmackinlay/pelican-plugins
|
refs/heads/master
|
liquid_tags/vimeo.py
|
25
|
"""
Vimeo Tag
---------
This implements a Liquid-style vimeo tag for Pelican,
based on the youtube tag which is in turn based on
the jekyll / octopress youtube tag [1]_
Syntax
------
{% vimeo id [width height] %}
Example
-------
{% vimeo 10739054 640 480 %}
Output
------
<span style="width:640px; height:480px;">
<iframe
src="//player.vimeo.com/video/10739054?title=0&byline=0&portrait=0"
width="640" height="480" frameborder="0"
webkitallowfullscreen mozallowfullscreen allowfullscreen>
</iframe>
</span>
[1] https://gist.github.com/jamieowen/2063748
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% vimeo id [width height] %}"
VIMEO = re.compile(r'(\S+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('vimeo')
def vimeo(preprocessor, tag, markup):
width = 640
height = 390
vimeo_id = None
match = VIMEO.search(markup)
if match:
groups = match.groups()
vimeo_id = groups[0]
width = groups[2] or width
height = groups[3] or height
if vimeo_id:
vimeo_out = """
<span class="videobox">
<iframe
src="//player.vimeo.com/video/{vimeo_id}?title=0&byline=0&portrait=0"
width="{width}" height="{height}" frameborder="0"
webkitAllowFullScreen mozallowfullscreen allowFullScreen>
</iframe>
</span>
""".format(width=width, height=height, vimeo_id=vimeo_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return vimeo_out
# ---------------------------------------------------
# This import allows vimeo tag to be a Pelican plugin
from liquid_tags import register # noqa
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/static/structure/general/shared_debris_deathstar_small_tube.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_debris_deathstar_small_tube.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
ader1990/cloudbase-init
|
refs/heads/master
|
cloudbaseinit/tests/plugins/common/userdataplugins/test_parthandler.py
|
7
|
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.plugins.common.userdataplugins import parthandler
class PartHandlerPluginTests(unittest.TestCase):
def setUp(self):
self._parthandler = parthandler.PartHandlerPlugin()
@mock.patch('cloudbaseinit.utils.encoding.write_file')
@mock.patch('tempfile.gettempdir')
@mock.patch('cloudbaseinit.utils.classloader.ClassLoader.load_module')
def test_process(self, mock_load_module, mock_gettempdir,
mock_write_file):
mock_part = mock.MagicMock()
mock_part_handler = mock.MagicMock()
mock_part.get_filename.return_value = 'fake_name'
mock_gettempdir.return_value = 'fake_directory'
mock_load_module.return_value = mock_part_handler
mock_part_handler.list_types.return_value = ['fake part']
response = self._parthandler.process(mock_part)
mock_part.get_filename.assert_called_once_with()
part_handler_path = os.path.join(mock_gettempdir.return_value,
mock_part.get_filename.return_value)
mock_write_file.assert_called_once_with(
part_handler_path, mock_part.get_payload.return_value)
mock_load_module.assert_called_once_with(os.path.join(
'fake_directory', 'fake_name'))
mock_part_handler.list_types.assert_called_once_with()
self.assertEqual({'fake part': mock_part_handler.handle_part},
response)
|
Jgarcia-IAS/SITE
|
refs/heads/master
|
openerp/tools/mail.py
|
38
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
'data-snippet-id', 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
])
def html_sanitize(src, silent=True, strict=False):
if not src:
return src
src = ustr(src, errors='replace')
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace('<%', cgi.escape('<%'))
src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'style': False, # do not remove style attributes
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
'comments': False,
'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if strict:
if etree.LXML_VERSION >= (3, 1, 0):
# lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': safe_attrs,
})
else:
kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
cleaned = cleaned.replace('<%', '<%')
cleaned = cleaned.replace('%>', '%>')
except etree.ParserError, e:
if 'empty' in str(e):
return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
cleaned = cleaned[5:-6]
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
specific heuristics
- try to strip signatures
- shorten the html to a maximum number of characters if requested
Some specific use case:
- MsOffice: ``div.style = border-top:solid;`` delimitates the beginning of
a quote; detecting by finding WordSection1 of MsoNormal
- Hotmail: ``hr.stopSpelling`` delimitates the beginning of a quote; detect
Hotmail by funding ``SkyDrivePlaceholder``
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore
takes as input html code coming from a sanitized source,
like fields.html.
:param boolean remove: remove the html code that is unwanted; otherwise it
is only flagged and tagged
:param boolean shorten: shorten the html; every excessing content will
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
:param dict expand_options: options for the read more link when shortening
the content.The used keys are the following:
- oe_expand_container_tag: class applied to the
container of the whole read more link
- oe_expand_container_class: class applied to the
link container (default: oe_mail_expand)
- oe_expand_container_content: content of the
container (default: ...)
- oe_expand_separator_node: optional separator, like
adding ... <br /><br /> <a ...>read more</a> (default: void)
- oe_expand_a_href: href of the read more link itself
(default: #)
- oe_expand_a_class: class applied to the <a> containing
the link itself (default: oe_mail_expand)
- oe_expand_a_content: content of the <a> (default: read more)
The formatted read more link is the following:
<cont_tag class="oe_expand_container_class">
oe_expand_container_content
if expand_options.get('oe_expand_separator_node'):
<oe_expand_separator_node/>
<a href="oe_expand_a_href" class="oe_expand_a_class">
oe_expand_a_content
</a>
</span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
if not source:
return source
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
def _create_node(tag, text, tail=None, attrs={}):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
for key, val in attrs.iteritems():
new_node.set(key, val)
return new_node
def _insert_new_node(node, index, new_node_tag, new_node_text, new_node_tail=None, new_node_attrs={}):
new_node = _create_node(new_node_tag, new_node_text, new_node_tail, new_node_attrs)
node.insert(index, new_node)
return new_node
def _tag_matching_regex_in_text(regex, node, new_node_tag='span', new_node_attrs={}):
text = node.text or ''
if not re.search(regex, text):
return
cur_node = node
node.text = ''
idx, iteration = 0, 0
for item in re.finditer(regex, text):
if iteration == 0:
cur_node.text = text[idx:item.start()]
else:
_insert_new_node(node, (iteration - 1) * 2 + 1, new_node_tag, text[idx:item.start()])
new_node = _insert_new_node(node, iteration * 2, new_node_tag, text[item.start():item.end()], None, new_node_attrs)
cur_node = new_node
idx = item.end()
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
def _truncate_node(node, position, simplify_whitespaces=True):
""" Truncate a node text at a given position. This algorithm will shorten
at the end of the word whose ending character exceeds position.
:param bool simplify_whitespaces: whether to try to count all successive
whitespaces as one character. This
option should not be True when trying
to keep 'pre' consistency.
"""
if node.text is None:
node.text = ''
truncate_idx = -1
if simplify_whitespaces:
cur_char_nbr = 0
word = None
node_words = node.text.strip(' \t\r\n').split()
for word in node_words:
cur_char_nbr += len(word)
if cur_char_nbr >= position:
break
if word:
truncate_idx = node.text.find(word) + len(word)
else:
truncate_idx = position
if truncate_idx == -1 or truncate_idx > len(node.text):
truncate_idx = len(node.text)
# compose new text bits
innertext = node.text[0:truncate_idx]
outertext = node.text[truncate_idx:]
node.text = innertext
# create <span> ... <a href="#">read more</a></span> node
read_more_node = _create_node(
expand_options.get('oe_expand_container_tag', 'span'),
expand_options.get('oe_expand_container_content', ' ... '),
None,
{'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
)
if expand_options.get('oe_expand_separator_node'):
read_more_separator_node = _create_node(
expand_options.get('oe_expand_separator_node'),
'',
None,
{}
)
read_more_node.append(read_more_separator_node)
read_more_link_node = _create_node(
'a',
expand_options.get('oe_expand_a_content', 'read more'),
None,
{
'href': expand_options.get('oe_expand_a_href', '#'),
'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
}
)
read_more_node.append(read_more_link_node)
# create outertext node
overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
if expand_options is None:
expand_options = {}
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# Pre processing
# ------------------------------------------------------------
# TDE TODO: --- MAIL ORIGINAL ---: '[\-]{4,}([^\-]*)[\-]{4,}'
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# html: ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)', re.IGNORECASE)
html = _replace_matching_regex(br_div_tags, html, '<br />')
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
for node in root.iter():
# remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
# form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# Processing
# ------------------------------------------------------------
# tree: tag nodes
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
overlength_section_id = None
overlength_section_count = 0
cur_char_nbr = 0
for node in root.iter():
# comments do not need processing
# note: bug in node.get(value, default) for HtmlComments, default never returned
if node.tag == etree.Comment:
continue
# do not take into account multiple spaces that are displayed as max 1 space in html
node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
# protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
if node.tag == 'section':
overlength_section_count += 1
node.set('section_closure', str(overlength_section_count))
if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
node.set('section_inner', str(overlength_section_count))
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
# state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
node.set('in_overlength', '1')
node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if root.get('hotmail') and node.tag == 'hr' and ('stopSpelling' in node.get('class', '') or 'stopSpelling' in node.get('id', '')):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
# here no quote_begin because we want to be able to remove some quoted
# text without removing all the remaining context
node.set('in_quote', '1')
if node.getparent() is not None and node.getparent().get('in_quote'):
# inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
# if protect section:
# 1/ find the first parent not being inside a section
# 2/ add the read more link
# else:
# 1/ truncate the text at the next available space
# 2/ create a 'read more' node, next to current node
# 3/ add the truncated text in a new node, next to 'read more' node
node_text = (node.text or '').strip().strip('\n').strip()
if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
while node_to_truncate.getparent() is not None:
if node_to_truncate.get('in_quote'):
node_to_truncate = node_to_truncate.getparent()
elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
node_to_truncate = node_to_truncate.getparent()
overlength_section_id = node_to_truncate.get('section_closure')
else:
break
overlength = True
node_to_truncate.set('truncate', '1')
if node_to_truncate == node:
node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
else:
node_to_truncate.set('truncate_position', str(len(node.text or '')))
cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
_truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
parent = node.getparent()
parent.tail = node.tail + (parent.tail or '')
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
# clean node
for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}$""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = openerp.registry(cr.dbname)['ir.mail_server']
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
|
nicolargo/intellij-community
|
refs/heads/master
|
python/testData/completion/exceptName.after.py
|
83
|
try:
import foo
except ImportError, bar:
|
amyvmiwei/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/sre_constants.py
|
106
|
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
|
HyperManTT/ECommerceSaleor
|
refs/heads/master
|
saleor/registration/views.py
|
2
|
from django.conf import settings
from django.contrib import messages, auth
from django.contrib.auth import views as django_views
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from saleor.cart.utils import find_and_assign_anonymous_cart
from .forms import LoginForm, SignupForm, SetPasswordForm
@find_and_assign_anonymous_cart()
def login(request):
kwargs = {
'template_name': 'account/login.html', 'authentication_form': LoginForm}
return django_views.login(request, **kwargs)
@login_required
def logout(request):
auth.logout(request)
messages.success(request, _('You have been successfully logged out.'))
return redirect(settings.LOGIN_REDIRECT_URL)
def signup(request):
form = SignupForm(request.POST or None)
if form.is_valid():
form.save(request=request)
messages.success(request, _('User has been created'))
return redirect(settings.LOGIN_REDIRECT_URL)
ctx = {'form': form}
return TemplateResponse(request, 'account/signup.html', ctx)
def password_reset(request):
template_name = 'account/password_reset.html'
post_reset_redirect = 'account_reset_password_done'
email_template_name = 'account/email/password_reset_message.txt'
subject_template_name = 'account/email/password_reset_subject.txt'
return django_views.password_reset(
request, template_name=template_name,
post_reset_redirect=post_reset_redirect,
email_template_name=email_template_name,
subject_template_name=subject_template_name)
def password_reset_confirm(request, uidb64=None, token=None):
template_name = 'account/password_reset_from_key.html'
post_reset_redirect = 'account_reset_password_complete'
set_password_form = SetPasswordForm
return django_views.password_reset_confirm(
request, uidb64=uidb64, token=token, template_name=template_name,
post_reset_redirect=post_reset_redirect,
set_password_form=set_password_form)
|
hdmetor/scikit-learn
|
refs/heads/master
|
sklearn/kernel_approximation.py
|
258
|
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
|
openstack/tooz
|
refs/heads/master
|
examples/coordinator.py
|
1
|
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
coordinator.stop()
|
sjlehtin/django
|
refs/heads/master
|
django/contrib/gis/gdal/raster/source.py
|
9
|
import json
import os
import sys
import uuid
from ctypes import (
addressof, byref, c_buffer, c_char_p, c_double, c_int, c_void_p, string_at,
)
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import BandList
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.gdal.raster.const import (
GDAL_RESAMPLE_ALGORITHMS, VSI_DELETE_BUFFER_ON_READ,
VSI_FILESYSTEM_BASE_PATH, VSI_TAKE_BUFFER_OWNERSHIP,
)
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry.regex import json_regex
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
class TransformPoint(list):
indices = {
'origin': (0, 3),
'scale': (1, 5),
'skew': (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
class GDALRaster(GDALRasterBase):
"""
Wrap a raster GDAL Data Source object.
"""
destructor = capi.close_ds
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, str) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, str):
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, bytes):
# Create a new raster in write mode.
self._write = 1
# Get size of buffer.
size = sys.getsizeof(ds_input)
# Pass data to ctypes, keeping a reference to the ctypes object so
# that the vsimem file remains available until the GDALRaster is
# deleted.
self._ds_input = c_buffer(ds_input)
# Create random name to reference in vsimem filesystem.
vsi_path = os.path.join(VSI_FILESYSTEM_BASE_PATH, str(uuid.uuid4()))
# Create vsimem file from buffer.
capi.create_vsi_file_from_mem_buffer(
force_bytes(vsi_path),
byref(self._ds_input),
size,
VSI_TAKE_BUFFER_OWNERSHIP,
)
# Open the new vsimem file as a GDALRaster.
try:
self._ptr = capi.open_ds(force_bytes(vsi_path), self._write)
except GDALException:
# Remove the broken file from the VSI filesystem.
capi.unlink_vsi_file(force_bytes(vsi_path))
raise GDALException('Failed creating VSI raster from the input buffer.')
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create null terminated gdal options array.
papsz_options = []
for key, val in ds_input.get('papsz_options', {}).items():
option = '{}={}'.format(key, val)
papsz_options.append(option.upper().encode())
papsz_options.append(None)
# Convert papszlist to ctypes array.
papsz_options = (c_char_p * len(papsz_options))(*papsz_options)
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
byref(papsz_options),
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
band = self.bands[i]
if 'nodata_value' in band_input:
band.nodata_value = band_input['nodata_value']
# Instantiate band filled with nodata values if only
# partial input data has been provided.
if band.nodata_value is not None and (
'data' not in band_input or
'size' in band_input or
'shape' in band_input):
band.data(data=(band.nodata_value,), shape=(1, 1))
# Set band data values from input.
band.data(
data=band_input.get('data'),
size=band_input.get('size'),
shape=band_input.get('shape'),
offset=band_input.get('offset'),
)
# Set SRID
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self.is_vsi_based:
# Remove the temporary file from the VSI in-memory filesystem.
capi.unlink_vsi_file(force_bytes(self.name))
super().__del__()
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def vsi_buffer(self):
if not self.is_vsi_based:
return None
# Prepare an integer that will contain the buffer length.
out_length = c_int()
# Get the data using the vsi file name.
dat = capi.get_mem_buffer_from_vsi_file(
force_bytes(self.name),
byref(out_length),
VSI_DELETE_BUFFER_ON_READ,
)
# Read the full buffer pointer.
return string_at(dat, out_length.value)
@cached_property
def is_vsi_based(self):
return self.name.startswith(VSI_FILESYSTEM_BASE_PATH)
@property
def name(self):
"""
Return the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Return the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Return the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Set the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, (int, str)):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def srid(self):
"""
Shortcut to access the srid of this GDALRaster.
"""
return self.srs.srid
@srid.setter
def srid(self, value):
"""
Shortcut to set this GDALRaster's srs from an srid.
"""
self.srs = value
@property
def geotransform(self):
"""
Return the geotransform of the data source.
Return the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Set the geotransform for the data source."
if len(values) != 6 or not all(isinstance(x, (int, float)) for x in values):
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Return the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):
"""
Return a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if 'width' not in ds_input:
ds_input['width'] = self.width
if 'height' not in ds_input:
ds_input['height'] = self.height
if 'srid' not in ds_input:
ds_input['srid'] = self.srs.srid
if 'origin' not in ds_input:
ds_input['origin'] = self.origin
if 'scale' not in ds_input:
ds_input['scale'] = self.scale
if 'skew' not in ds_input:
ds_input['skew'] = self.skew
# Get the driver, name, and datatype of the target raster
if 'driver' not in ds_input:
ds_input['driver'] = self.driver.name
if 'name' not in ds_input:
ds_input['name'] = self.name + '_copy.' + self.driver.name
if 'datatype' not in ds_input:
ds_input['datatype'] = self.bands[0].datatype()
# Instantiate raster bands filled with nodata values.
ds_input['bands'] = [{'nodata_value': bnd.nodata_value} for bnd in self.bands]
# Create target raster
target = GDALRaster(ds_input, write=True)
# Select resampling algorithm
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Reproject image
capi.reproject_image(
self._ptr, self.srs.wkt.encode(),
target._ptr, target.srs.wkt.encode(),
algorithm, 0.0, max_error,
c_void_p(), c_void_p(), c_void_p()
)
# Make sure all data is written to file
target._flush()
return target
def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour',
max_error=0.0):
"""
Return a copy of this raster reprojected into the given SRID.
"""
# Convert the resampling algorithm name into an algorithm id
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Instantiate target spatial reference system
target_srs = SpatialReference(srid)
# Create warped virtual dataset in the target reference system
target = capi.auto_create_warped_vrt(
self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(),
algorithm, max_error, c_void_p()
)
target = GDALRaster(target)
# Construct the target warp dictionary from the virtual raster
data = {
'srid': srid,
'width': target.width,
'height': target.height,
'origin': [target.origin.x, target.origin.y],
'scale': [target.scale.x, target.scale.y],
'skew': [target.skew.x, target.skew.y],
}
# Set the driver and filepath if provided
if driver:
data['driver'] = driver
if name:
data['name'] = name
# Warp the raster into new srid
return self.warp(data, resampling=resampling, max_error=max_error)
@property
def info(self):
"""
Return information about this raster in a string format equivalent
to the output of the gdalinfo command line utility.
"""
if not capi.get_ds_info:
raise ValueError('GDAL ≥ 2.1 is required for using the info property.')
return capi.get_ds_info(self.ptr, None).decode()
|
darkwing/kuma
|
refs/heads/master
|
vendor/packages/pygments/lexers/graph.py
|
72
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
<http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
For the Cypher version in Neo4J 2.0
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by)\b', Keyword),
],
'clauses': [
# TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
(r'(all|any|as|asc|create|create\s+unique|delete|'
r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
r'order\s+by|return|set|skip|single|start|union|where|with)\b',
Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
|
adafruit/Adafruit_Python_SSD1306
|
refs/heads/master
|
setup.py
|
1
|
import os
import io
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_SSD1306',
version = '1.6.2',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Python library to use SSD1306-based 128x64 or 128x32 pixel OLED displays with a Raspberry Pi or Beaglebone Black.',
long_description = long_description,
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_SSD1306/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.6.5'],
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages())
|
ohnoimdead/horizon
|
refs/heads/master
|
django-openstack/django_openstack/tests/view_tests/dash/port_tests.py
|
5
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from mox import IgnoreArg, IsA
import quantum.client
class PortViewTests(base.BaseViewTests):
def setUp(self):
super(PortViewTests, self).setUp()
def test_port_create(self):
self.mox.StubOutWithMock(api, "quantum_create_port")
api.quantum_create_port(IsA(http.HttpRequest), 'n1').AndReturn(True)
formData = {'ports_num': 1,
'network': 'n1',
'method': 'CreatePort'}
self.mox.StubOutWithMock(messages, 'success')
messages.success(IgnoreArg(), IsA(basestring))
res = self.client.post(reverse('dash_ports_create',
args=[self.request.user.tenant_id, "n1"]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_networks_detail',
args=[self.request.user.tenant_id,
"n1"]))
def test_port_delete(self):
self.mox.StubOutWithMock(api, "quantum_delete_port")
api.quantum_delete_port(IsA(http.HttpRequest),
'n1', 'p1').AndReturn(True)
formData = {'port': 'p1',
'network': 'n1',
'method': 'DeletePort'}
self.mox.StubOutWithMock(messages, 'success')
messages.success(IgnoreArg(), IsA(basestring))
res = self.client.post(reverse('dash_networks_detail',
args=[self.request.user.tenant_id, "n1"]),
formData)
def test_port_attach(self):
self.mox.StubOutWithMock(api, "quantum_attach_port")
api.quantum_attach_port(IsA(http.HttpRequest),
'n1', 'p1', dict).AndReturn(True)
formData = {'port': 'p1',
'network': 'n1',
'vif_id': 'v1',
'method': 'AttachPort'}
self.mox.StubOutWithMock(messages, 'success')
messages.success(IgnoreArg(), IsA(basestring))
res = self.client.post(reverse('dash_ports_attach',
args=[self.request.user.tenant_id, "n1", "p1"]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_networks_detail',
args=[self.request.user.tenant_id,
"n1"]))
def test_port_detach(self):
self.mox.StubOutWithMock(api, "quantum_detach_port")
api.quantum_detach_port(IsA(http.HttpRequest),
'n1', 'p1').AndReturn(True)
formData = {'port': 'p1',
'network': 'n1',
'method': 'DetachPort'}
self.mox.StubOutWithMock(messages, 'success')
messages.success(IgnoreArg(), IsA(basestring))
res = self.client.post(reverse('dash_networks_detail',
args=[self.request.user.tenant_id, "n1"]),
formData)
|
samrocketman/ekeyfinder
|
refs/heads/main
|
docs/chm/generate_doc.py
|
1
|
#!/usr/bin/env python
'''
Created by Sam Gleske
Created 02-28-2011 (mm-dd-yyyy)
MIT License (http://www.opensource.org/licenses/mit-license.php)
This python script parses mediawiki xml schema exported from the
MediaWiki wiki interface (admin function). During parsing it
utilizes documentation.py to generate html documentation.
Generate the wikixml file using only latest revisions without
history.
Why did I do this? Because I'm awesome and lazy.
Usage: ./wikidoc.py
will generate all the html files needed for the chm to compile
'''
import re,sys,os.path
from documentation import *
#here's some cross platform settings
htm_ext = ".html"
if __name__ == '__main__':
#necessary regular expressions, parsing xml quick and dirty
page_regex = re.compile(r'<page>(.*?)</page>', re.MULTILINE|re.DOTALL)
wikidoc_regex = re.compile(r'<text.*?>(.*?)</text>', re.MULTILINE|re.DOTALL)
f=open("documentation/wikidoc.xml", 'r')
wikixml = f.read()
f.close()
hits = re.findall(page_regex, wikixml)
#print hits[0]
for hit in hits:
title = re.findall(r'<title>(.*?)</title>',hit)[0]
wikidoc = re.findall(wikidoc_regex,hit)[0]
htmlfile = title.replace(" ","_").lower() + htm_ext
myobj = PageFormatter(wikidoc,title)
f=open(htmlfile,'w')
f.write(myobj.htmloutput())
f.close()
print "Generated:",htmlfile
|
anandpdoshi/frappe
|
refs/heads/develop
|
frappe/model/docfield.py
|
61
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""docfield utililtes"""
import frappe
def rename(doctype, fieldname, newname):
"""rename docfield"""
df = frappe.db.sql("""select * from tabDocField where parent=%s and fieldname=%s""",
(doctype, fieldname), as_dict=1)
if not df:
return
df = df[0]
if frappe.db.get_value('DocType', doctype, 'issingle'):
update_single(df, newname)
else:
update_table(df, newname)
update_parent_field(df, newname)
def update_single(f, new):
"""update in tabSingles"""
frappe.db.begin()
frappe.db.sql("""update tabSingles set field=%s where doctype=%s and field=%s""",
(new, f['parent'], f['fieldname']))
frappe.db.commit()
def update_table(f, new):
"""update table"""
query = get_change_column_query(f, new)
if query:
frappe.db.sql(query)
def update_parent_field(f, new):
"""update 'parentfield' in tables"""
if f['fieldtype']=='Table':
frappe.db.begin()
frappe.db.sql("""update `tab%s` set parentfield=%s where parentfield=%s""" \
% (f['options'], '%s', '%s'), (new, f['fieldname']))
frappe.db.commit()
def get_change_column_query(f, new):
"""generate change fieldname query"""
desc = frappe.db.sql("desc `tab%s`" % f['parent'])
for d in desc:
if d[0]== f['fieldname']:
return 'alter table `tab%s` change `%s` `%s` %s' % \
(f['parent'], f['fieldname'], new, d[1])
|
oktie/linkedct
|
refs/heads/master
|
ctdjango/geosearch/views.py
|
1
|
import math
from django.shortcuts import render_to_response
from django.conf import settings
from linkedct.models import *
import databrowse
from databrowse.datastructures import *
from geopy import geocoders, distance
def map_view(request):
databrowse.site.root_url = settings.CONFIG['ROOT']
countries = Country.objects.all().order_by('country_name')
country = 'Canada'
city = 'Toronto'
dist = '5'
condition = ''
input_error = False
geo_error = False
over_max = False
no_result = False
error = False
inputs = request.GET
if ('country' in inputs) and ('city' in inputs) and ('distance' in inputs) and ('condition' in inputs):
country = inputs.get('country')
city = inputs.get('city')
dist = inputs.get('distance')
condition = inputs.get('condition')
if not country or not city or not dist or not condition:
input_error = True
elif float(dist) > 50 or float(dist) < 0:
over_max = True
else:
g = geocoders.GoogleV3()
try:
_, (lat, lng) = g.geocode(city+','+country, exactly_one=False)[0]
except:
geo_error = True
error = True
radius = float(dist)
if not error:
lat_diff = radius/69
lng_diff = radius/abs(math.cos(math.radians(lat))*69)
lat1 = str(lat - lat_diff)
lat2 = str(lat + lat_diff)
lng1 = str(lng - lng_diff)
lng2 = str(lng + lng_diff)
coords = Coordinates.objects.select_related('address', 'latitude', 'longitude', 'address__country__country_name').\
filter(address__country__country_name = country,
latitude__range=(lat1, lat2),
longitude__range=(lng1,lng2)).values_list('address', 'latitude', 'longitude')
within = []
for c in coords:
if distance.distance((lat,lng), (float(c[1]),float(c[2]))).miles < radius:
within.append(c)
if not within:
no_result = True
error = True
if not error:
conds = Condition.objects.select_related('label', 'slug').filter(label__icontains=condition).values_list('slug', flat=True)
trials = Trial.objects.only('conditions__slug').filter(conditions__slug__in=conds).distinct()
if not trials:
no_result = True
error = True
trials_dict = {}
if not error:
for c in within:
ts = trials.filter(locations__facility__address__in = [c[0]])[:4].count()
if ts:
trials_dict[c] = ts
if not trials_dict:
no_result = True
error = True
if not error:
return render_to_response('geosearch/map_results.html',
{'coordinates': (lat, lng), 'trials': trials_dict.items(), 'countries': countries,
'country':country, 'city':city, 'distance':dist, 'condition':condition, 'root_url': databrowse.site.root_url})
return render_to_response('geosearch/map.html',
{'input_error': input_error, 'geo_error': geo_error, 'over_max': over_max, 'no_result': no_result,
'countries': countries, 'country':country, 'city':city, 'distance':dist,
'condition':condition, 'root_url': databrowse.site.root_url})
def map_search_result_view(request):
error = False
databrowse.site.root_url = settings.CONFIG['ROOT']
inputs = request.GET
if ('location' in inputs) and ('condition' in inputs):
location_addr = inputs.get('location')
condition = inputs.get('condition')
if not location_addr or not condition:
error = True
else:
trial_m = EasyModel(databrowse.site, Trial)
cond_list = Condition.objects.filter(label__icontains=condition).values_list('slug', flat=True)
trials = Trial.objects.only('locations__facility__address','conditions__slug').filter(locations__facility__address__in=[location_addr],\
conditions__slug__in=cond_list).distinct()
facilities = []
for t in trials:
for l in t.locations.only('facility', 'facility__address').filter(facility__address=location_addr).distinct():
if l.facility.facility_name not in facilities:
facilities.append(l.facility.facility_name)
facility_dict = {}
for f in facilities:
trial_dict = {}
for t in trials.only('locations__facility', 'conditions').filter(locations__facility__facility_name=f):
trial_dict[EasyInstance(trial_m, t)] = t.conditions.all()
facility_dict[f] = trial_dict
return render_to_response('geosearch/map_search_result.html',
{'trials':facility_dict,
'address': Address.objects.get(slug = location_addr),
'condition': condition,
'root_url': databrowse.site.root_url})
return render_to_response('geosearch/map_search_result.html',
{'error':error, 'root_url': databrowse.site.root_url})
|
alexissmirnov/donomo
|
refs/heads/master
|
donomo_archive/lib/offlineimap/imapserver.py
|
1
|
# IMAP server support
# Copyright (C) 2002 - 2007 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import imaplib
from offlineimap import imaplibutil, imaputil, threadutil
from offlineimap.ui import UIBase
from threading import *
import thread, hmac, os, time
import base64
from StringIO import StringIO
from platform import system
try:
# do we have a recent pykerberos?
have_gss = False
import kerberos
if 'authGSSClientWrap' in dir(kerberos):
have_gss = True
except ImportError:
pass
class UsefulIMAPMixIn:
def getstate(self):
return self.state
def getselectedfolder(self):
if self.getstate() == 'SELECTED':
return self.selectedfolder
return None
def select(self, mailbox='INBOX', readonly=None, force = 0):
if (not force) and self.getselectedfolder() == mailbox \
and self.is_readonly == readonly:
# No change; return.
return
result = self.__class__.__bases__[1].select(self, mailbox, readonly)
if result[0] != 'OK':
raise ValueError, "Error from select: %s" % str(result)
if self.getstate() == 'SELECTED':
self.selectedfolder = mailbox
else:
self.selectedfolder = None
def _mesg(self, s, secs=None):
imaplibutil.new_mesg(self, s, secs)
class UsefulIMAP4(UsefulIMAPMixIn, imaplib.IMAP4):
def open(self, host = '', port = imaplib.IMAP4_PORT):
imaplibutil.new_open(self, host, port)
# This is a hack around Darwin's implementation of realloc() (which
# Python uses inside the socket code). On Darwin, we split the
# message into 100k chunks, which should be small enough - smaller
# might start seriously hurting performance ...
def read(self, size):
if (system() == 'Darwin') and (size>0) :
read = 0
io = StringIO()
while read < size:
data = imaplib.IMAP4.read (self, min(size-read,8192))
read += len(data)
io.write(data)
return io.getvalue()
else:
return imaplib.IMAP4.read (self, size)
class UsefulIMAP4_SSL(UsefulIMAPMixIn, imaplibutil.WrappedIMAP4_SSL):
def open(self, host = '', port = imaplib.IMAP4_SSL_PORT):
imaplibutil.new_open_ssl(self, host, port)
# This is the same hack as above, to be used in the case of an SSL
# connexion.
def read(self, size):
if (system() == 'Darwin') and (size>0) :
read = 0
io = StringIO()
while read < size:
data = imaplibutil.WrappedIMAP4_SSL.read (self, min(size-read,8192))
read += len(data)
io.write(data)
return io.getvalue()
else:
return imaplibutil.WrappedIMAP4_SSL.read (self,size)
class UsefulIMAP4_Tunnel(UsefulIMAPMixIn, imaplibutil.IMAP4_Tunnel): pass
class IMAPServer:
GSS_STATE_STEP = 0
GSS_STATE_WRAP = 1
def __init__(self, config, reposname,
username = None, password = None, hostname = None,
port = None, ssl = 1, maxconnections = 1, tunnel = None,
reference = '""', sslclientcert = None, sslclientkey = None):
self.reposname = reposname
self.config = config
self.username = username
self.password = password
self.passworderror = None
self.goodpassword = None
self.hostname = hostname
self.tunnel = tunnel
self.port = port
self.usessl = ssl
self.sslclientcert = sslclientcert
self.sslclientkey = sslclientkey
self.delim = None
self.root = None
if port == None:
if ssl:
self.port = 993
else:
self.port = 143
self.maxconnections = maxconnections
self.availableconnections = []
self.assignedconnections = []
self.lastowner = {}
self.semaphore = BoundedSemaphore(self.maxconnections)
self.connectionlock = Lock()
self.reference = reference
self.gss_step = self.GSS_STATE_STEP
self.gss_vc = None
self.gssapi = False
def getpassword(self):
if self.goodpassword != None:
return self.goodpassword
if self.password != None and self.passworderror == None:
return self.password
self.password = UIBase.getglobalui().getpass(self.reposname,
self.config,
self.passworderror)
self.passworderror = None
return self.password
def getdelim(self):
"""Returns this server's folder delimiter. Can only be called
after one or more calls to acquireconnection."""
return self.delim
def getroot(self):
"""Returns this server's folder root. Can only be called after one
or more calls to acquireconnection."""
return self.root
def releaseconnection(self, connection):
"""Releases a connection, returning it to the pool."""
self.connectionlock.acquire()
self.assignedconnections.remove(connection)
self.availableconnections.append(connection)
self.connectionlock.release()
self.semaphore.release()
def md5handler(self, response):
ui = UIBase.getglobalui()
challenge = response.strip()
ui.debug('imap', 'md5handler: got challenge %s' % challenge)
passwd = self.repos.getpassword()
retval = self.username + ' ' + hmac.new(passwd, challenge).hexdigest()
ui.debug('imap', 'md5handler: returning %s' % retval)
return retval
def plainauth(self, imapobj):
UIBase.getglobalui().debug('imap',
'Attempting plain authentication for %s' % self.username)
imapobj.login(self.username, self.repos.getpassword())
def gssauth(self, response):
data = base64.b64encode(response)
try:
if self.gss_step == self.GSS_STATE_STEP:
if not self.gss_vc:
rc, self.gss_vc = kerberos.authGSSClientInit('imap@' +
self.hostname)
response = kerberos.authGSSClientResponse(self.gss_vc)
rc = kerberos.authGSSClientStep(self.gss_vc, data)
if rc != kerberos.AUTH_GSS_CONTINUE:
self.gss_step = self.GSS_STATE_WRAP
elif self.gss_step == self.GSS_STATE_WRAP:
rc = kerberos.authGSSClientUnwrap(self.gss_vc, data)
response = kerberos.authGSSClientResponse(self.gss_vc)
rc = kerberos.authGSSClientWrap(self.gss_vc, response,
self.username)
response = kerberos.authGSSClientResponse(self.gss_vc)
except kerberos.GSSError, err:
# Kerberos errored out on us, respond with None to cancel the
# authentication
UIBase.getglobalui().debug('imap',
'%s: %s' % (err[0][0], err[1][0]))
return None
if not response:
response = ''
return base64.b64decode(response)
def acquireconnection(self):
"""Fetches a connection from the pool, making sure to create a new one
if needed, to obey the maximum connection limits, etc.
Opens a connection to the server and returns an appropriate
object."""
self.semaphore.acquire()
self.connectionlock.acquire()
imapobj = None
if len(self.availableconnections): # One is available.
# Try to find one that previously belonged to this thread
# as an optimization. Start from the back since that's where
# they're popped on.
threadid = thread.get_ident()
imapobj = None
for i in range(len(self.availableconnections) - 1, -1, -1):
tryobj = self.availableconnections[i]
if self.lastowner[tryobj] == threadid:
imapobj = tryobj
del(self.availableconnections[i])
break
if not imapobj:
imapobj = self.availableconnections[0]
del(self.availableconnections[0])
self.assignedconnections.append(imapobj)
self.lastowner[imapobj] = thread.get_ident()
self.connectionlock.release()
return imapobj
self.connectionlock.release() # Release until need to modify data
""" Must be careful here that if we fail we should bail out gracefully
and release locks / threads so that the next attempt can try...
"""
success = 0
try:
while not success:
# Generate a new connection.
if self.tunnel:
UIBase.getglobalui().connecting('tunnel', self.tunnel)
imapobj = UsefulIMAP4_Tunnel(self.tunnel)
success = 1
elif self.usessl:
UIBase.getglobalui().connecting(self.hostname, self.port)
imapobj = UsefulIMAP4_SSL(self.hostname, self.port,
self.sslclientkey, self.sslclientcert)
else:
UIBase.getglobalui().connecting(self.hostname, self.port)
imapobj = UsefulIMAP4(self.hostname, self.port)
imapobj.mustquote = imaplibutil.mustquote
if not self.tunnel:
try:
# Try GSSAPI and continue if it fails
if 'AUTH=GSSAPI' in imapobj.capabilities and have_gss:
UIBase.getglobalui().debug('imap',
'Attempting GSSAPI authentication')
try:
imapobj.authenticate('GSSAPI', self.gssauth)
except imapobj.error, val:
self.gssapi = False
UIBase.getglobalui().debug('imap',
'GSSAPI Authentication failed')
else:
self.gssapi = True
#if we do self.password = None then the next attempt cannot try...
#self.password = None
if not self.gssapi:
if 'AUTH=CRAM-MD5' in imapobj.capabilities:
UIBase.getglobalui().debug('imap',
'Attempting CRAM-MD5 authentication')
try:
imapobj.authenticate('CRAM-MD5', self.md5handler)
except imapobj.error, val:
self.plainauth(imapobj)
else:
self.plainauth(imapobj)
# Would bail by here if there was a failure.
success = 1
self.goodpassword = self.password
except imapobj.error, val:
self.passworderror = str(val)
raise
#self.password = None
if self.delim == None:
listres = imapobj.list(self.reference, '""')[1]
if listres == [None] or listres == None:
# Some buggy IMAP servers do not respond well to LIST "" ""
# Work around them.
listres = imapobj.list(self.reference, '"*"')[1]
self.delim, self.root = \
imaputil.imapsplit(listres[0])[1:]
self.delim = imaputil.dequote(self.delim)
self.root = imaputil.dequote(self.root)
self.connectionlock.acquire()
self.assignedconnections.append(imapobj)
self.lastowner[imapobj] = thread.get_ident()
self.connectionlock.release()
return imapobj
except:
"""If we are here then we did not succeed in getting a connection -
we should clean up and then re-raise the error..."""
self.semaphore.release()
#Make sure that this can be retried the next time...
self.passworderror = None
if(self.connectionlock.locked()):
self.connectionlock.release()
raise
def connectionwait(self):
"""Waits until there is a connection available. Note that between
the time that a connection becomes available and the time it is
requested, another thread may have grabbed it. This function is
mainly present as a way to avoid spawning thousands of threads
to copy messages, then have them all wait for 3 available connections.
It's OK if we have maxconnections + 1 or 2 threads, which is what
this will help us do."""
threadutil.semaphorewait(self.semaphore)
def close(self):
# Make sure I own all the semaphores. Let the threads finish
# their stuff. This is a blocking method.
self.connectionlock.acquire()
threadutil.semaphorereset(self.semaphore, self.maxconnections)
for imapobj in self.assignedconnections + self.availableconnections:
imapobj.logout()
self.assignedconnections = []
self.availableconnections = []
self.lastowner = {}
# reset kerberos state
self.gss_step = self.GSS_STATE_STEP
self.gss_vc = None
self.gssapi = False
self.connectionlock.release()
def keepalive(self, timeout, event):
"""Sends a NOOP to each connection recorded. It will wait a maximum
of timeout seconds between doing this, and will continue to do so
until the Event object as passed is true. This method is expected
to be invoked in a separate thread, which should be join()'d after
the event is set."""
ui = UIBase.getglobalui()
ui.debug('imap', 'keepalive thread started')
while 1:
ui.debug('imap', 'keepalive: top of loop')
time.sleep(timeout)
ui.debug('imap', 'keepalive: after wait')
if event.isSet():
ui.debug('imap', 'keepalive: event is set; exiting')
return
ui.debug('imap', 'keepalive: acquiring connectionlock')
self.connectionlock.acquire()
numconnections = len(self.assignedconnections) + \
len(self.availableconnections)
self.connectionlock.release()
ui.debug('imap', 'keepalive: connectionlock released')
threads = []
imapobjs = []
for i in range(numconnections):
ui.debug('imap', 'keepalive: processing connection %d of %d' % (i, numconnections))
imapobj = self.acquireconnection()
ui.debug('imap', 'keepalive: connection %d acquired' % i)
imapobjs.append(imapobj)
thr = threadutil.ExitNotifyThread(target = imapobj.noop)
thr.setDaemon(1)
thr.start()
threads.append(thr)
ui.debug('imap', 'keepalive: thread started')
ui.debug('imap', 'keepalive: joining threads')
for thr in threads:
# Make sure all the commands have completed.
thr.join()
ui.debug('imap', 'keepalive: releasing connections')
for imapobj in imapobjs:
self.releaseconnection(imapobj)
ui.debug('imap', 'keepalive: bottom of loop')
class ConfigedIMAPServer(IMAPServer):
"""This class is designed for easier initialization given a ConfigParser
object and an account name. The passwordhash is used if
passwords for certain accounts are known. If the password for this
account is listed, it will be obtained from there."""
def __init__(self, repository, passwordhash = {}):
"""Initialize the object. If the account is not a tunnel,
the password is required."""
self.repos = repository
self.config = self.repos.getconfig()
usetunnel = self.repos.getpreauthtunnel()
if not usetunnel:
host = self.repos.gethost()
user = self.repos.getuser()
port = self.repos.getport()
ssl = self.repos.getssl()
sslclientcert = self.repos.getsslclientcert()
sslclientkey = self.repos.getsslclientkey()
reference = self.repos.getreference()
server = None
password = None
if repository.getname() in passwordhash:
password = passwordhash[repository.getname()]
# Connect to the remote server.
if usetunnel:
IMAPServer.__init__(self, self.config, self.repos.getname(),
tunnel = usetunnel,
reference = reference,
maxconnections = self.repos.getmaxconnections())
else:
if not password:
password = self.repos.getpassword()
IMAPServer.__init__(self, self.config, self.repos.getname(),
user, password, host, port, ssl,
self.repos.getmaxconnections(),
reference = reference,
sslclientcert = sslclientcert,
sslclientkey = sslclientkey)
|
egabancho/invenio
|
refs/heads/pu
|
invenio/modules/previewer/previewerext/zip.py
|
1
|
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Simple ZIP archive previewer.
Previewer needs to be enabled by setting following config variable.
.. code-block:: python
CFG_PREVIEW_PREFERENCE = {'.zip': ['zip']}
"""
import os
import zipfile
from flask import render_template, request
def make_tree(archive_name):
"""Create tree structure from ZIP archive."""
zf = zipfile.ZipFile(archive_name)
tree = {'type': 'folder', 'id': -1, 'children': {}}
for i, info in enumerate(zf.infolist()):
comps = info.filename.split(os.sep)
node = tree
for c in comps:
if c not in node['children']:
if c == '':
node['type'] = 'folder'
continue
node['children'][c] = {
'name': c, 'type': 'item', 'id': 'item%s' % i,
'children': {}}
node = node['children'][c]
node['size'] = info.file_size
return tree
def children_to_list(node):
"""Organize children structure."""
if node['type'] == 'item' and len(node['children']) == 0:
del node['children']
else:
node['type'] = 'folder'
node['children'] = list(node['children'].values())
node['children'].sort(key=lambda x: x['name'])
node['children'] = map(children_to_list, node['children'])
return node
def can_preview(f):
"""Return True if filetype can be previewed."""
return f.superformat.lower() == '.zip'
def preview(f):
"""Return appropiate template and pass the file and an embed flag."""
tree = children_to_list(make_tree(f.get_full_path()))['children']
return render_template("previewer/zip.html", f=f, tree=tree,
embed=request.args.get('embed', type=bool))
|
sudhirmohanraj/python_koans
|
refs/heads/master
|
python2/libs/colorama/ansi.py
|
527
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
|
WSDC-NITWarangal/django
|
refs/heads/master
|
django/db/backends/utils.py
|
430
|
from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
|
seem-sky/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/xml/sax/saxutils.py
|
76
|
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
adamjmcgrath/glancydesign
|
refs/heads/master
|
django/views/generic/detail.py
|
154
|
import re
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, View
class SingleObjectMixin(object):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get('pk', None)
slug = self.kwargs.get('slug', None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"Generic detail view %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Get the queryset to look an object up against. May not be called if
`get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"%(cls)s is missing a queryset. Define "
u"%(cls)s.model, %(cls)s.queryset, or override "
u"%(cls)s.get_object()." % {
'cls': self.__class__.__name__
})
return self.queryset._clone()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(obj, '_meta'):
return smart_str(obj._meta.object_name.lower())
else:
return None
def get_context_data(self, **kwargs):
context = kwargs
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class BaseDetailView(SingleObjectMixin, View):
def get(self, request, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if hasattr(self.object, '_meta'):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.object_name.lower(),
self.template_name_suffix
))
elif hasattr(self, 'model') and hasattr(self.model, '_meta'):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
self.template_name_suffix
))
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
GPNMilano/PyPRPImporter
|
refs/heads/master
|
PyPRPImport/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.